diff --git a/apis/alb/v1alpha1/zz_backendgroup_terraformed.go b/apis/alb/v1alpha1/zz_backendgroup_terraformed.go index 99cf339..b2491da 100755 --- a/apis/alb/v1alpha1/zz_backendgroup_terraformed.go +++ b/apis/alb/v1alpha1/zz_backendgroup_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this BackendGroup func (mg *BackendGroup) GetTerraformResourceType() string { - return "yandex_alb_backend_group" + return "yandex_alb_backend_group" } // GetConnectionDetailsMapping for this BackendGroup func (tr *BackendGroup) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this BackendGroup func (tr *BackendGroup) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this BackendGroup func (tr *BackendGroup) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this BackendGroup func (tr *BackendGroup) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this BackendGroup func (tr *BackendGroup) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this BackendGroup func (tr *BackendGroup) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this BackendGroup func (tr *BackendGroup) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this BackendGroup func (tr *BackendGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this BackendGroup using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *BackendGroup) LateInitialize(attrs []byte) (bool, error) { - params := &BackendGroupParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &BackendGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *BackendGroup) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/alb/v1alpha1/zz_backendgroup_types.go b/apis/alb/v1alpha1/zz_backendgroup_types.go index 5ac127f..1d0a6d0 100755 --- a/apis/alb/v1alpha1/zz_backendgroup_types.go +++ b/apis/alb/v1alpha1/zz_backendgroup_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,1617 +7,1438 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type BackendGroupInitParameters struct { + // Description of the backend group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the backend group. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder that the resource belongs to. If value is omitted, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Grpc backend specification that will be used by the ALB Backend Group. Structure is documented below. + GRPCBackend []GRPCBackendInitParameters `json:"grpcBackend,omitempty" tf:"grpc_backend,omitempty"` -// Grpc backend specification that will be used by the ALB Backend Group. Structure is documented below. -GRPCBackend []GRPCBackendInitParameters `json:"grpcBackend,omitempty" tf:"grpc_backend,omitempty"` + // Http backend specification that will be used by the ALB Backend Group. Structure is documented below. + HTTPBackend []HTTPBackendInitParameters `json:"httpBackend,omitempty" tf:"http_backend,omitempty"` -// Http backend specification that will be used by the ALB Backend Group. Structure is documented below. -HTTPBackend []HTTPBackendInitParameters `json:"httpBackend,omitempty" tf:"http_backend,omitempty"` + // Labels to assign to this backend group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Labels to assign to this backend group. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name of the Backend Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the Backend Group. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Session affinity mode determines how incoming requests are grouped into one session. Structure is documented below. + SessionAffinity []SessionAffinityInitParameters `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` -// Session affinity mode determines how incoming requests are grouped into one session. Structure is documented below. -SessionAffinity []SessionAffinityInitParameters `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` - -// Stream backend specification that will be used by the ALB Backend Group. Structure is documented below. -StreamBackend []StreamBackendInitParameters `json:"streamBackend,omitempty" tf:"stream_backend,omitempty"` + // Stream backend specification that will be used by the ALB Backend Group. Structure is documented below. + StreamBackend []StreamBackendInitParameters `json:"streamBackend,omitempty" tf:"stream_backend,omitempty"` } - type BackendGroupObservation struct { + // The backend group creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// The backend group creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` - -// Description of the backend group. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the backend group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Folder that the resource belongs to. If value is omitted, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Grpc backend specification that will be used by the ALB Backend Group. Structure is documented below. -GRPCBackend []GRPCBackendObservation `json:"grpcBackend,omitempty" tf:"grpc_backend,omitempty"` + // Grpc backend specification that will be used by the ALB Backend Group. Structure is documented below. + GRPCBackend []GRPCBackendObservation `json:"grpcBackend,omitempty" tf:"grpc_backend,omitempty"` -// Http backend specification that will be used by the ALB Backend Group. Structure is documented below. -HTTPBackend []HTTPBackendObservation `json:"httpBackend,omitempty" tf:"http_backend,omitempty"` + // Http backend specification that will be used by the ALB Backend Group. Structure is documented below. + HTTPBackend []HTTPBackendObservation `json:"httpBackend,omitempty" tf:"http_backend,omitempty"` -// The ID of the backend group. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The ID of the backend group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Labels to assign to this backend group. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this backend group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the Backend Group. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Backend Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Session affinity mode determines how incoming requests are grouped into one session. Structure is documented below. -SessionAffinity []SessionAffinityObservation `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + // Session affinity mode determines how incoming requests are grouped into one session. Structure is documented below. + SessionAffinity []SessionAffinityObservation `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` -// Stream backend specification that will be used by the ALB Backend Group. Structure is documented below. -StreamBackend []StreamBackendObservation `json:"streamBackend,omitempty" tf:"stream_backend,omitempty"` + // Stream backend specification that will be used by the ALB Backend Group. Structure is documented below. + StreamBackend []StreamBackendObservation `json:"streamBackend,omitempty" tf:"stream_backend,omitempty"` } - type BackendGroupParameters struct { + // Description of the backend group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the backend group. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// Folder that the resource belongs to. If value is omitted, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Grpc backend specification that will be used by the ALB Backend Group. Structure is documented below. -// +kubebuilder:validation:Optional -GRPCBackend []GRPCBackendParameters `json:"grpcBackend,omitempty" tf:"grpc_backend,omitempty"` + // Grpc backend specification that will be used by the ALB Backend Group. Structure is documented below. + // +kubebuilder:validation:Optional + GRPCBackend []GRPCBackendParameters `json:"grpcBackend,omitempty" tf:"grpc_backend,omitempty"` -// Http backend specification that will be used by the ALB Backend Group. Structure is documented below. -// +kubebuilder:validation:Optional -HTTPBackend []HTTPBackendParameters `json:"httpBackend,omitempty" tf:"http_backend,omitempty"` + // Http backend specification that will be used by the ALB Backend Group. Structure is documented below. + // +kubebuilder:validation:Optional + HTTPBackend []HTTPBackendParameters `json:"httpBackend,omitempty" tf:"http_backend,omitempty"` -// Labels to assign to this backend group. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this backend group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the Backend Group. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Backend Group. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Session affinity mode determines how incoming requests are grouped into one session. Structure is documented below. -// +kubebuilder:validation:Optional -SessionAffinity []SessionAffinityParameters `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + // Session affinity mode determines how incoming requests are grouped into one session. Structure is documented below. + // +kubebuilder:validation:Optional + SessionAffinity []SessionAffinityParameters `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` -// Stream backend specification that will be used by the ALB Backend Group. Structure is documented below. -// +kubebuilder:validation:Optional -StreamBackend []StreamBackendParameters `json:"streamBackend,omitempty" tf:"stream_backend,omitempty"` + // Stream backend specification that will be used by the ALB Backend Group. Structure is documented below. + // +kubebuilder:validation:Optional + StreamBackend []StreamBackendParameters `json:"streamBackend,omitempty" tf:"stream_backend,omitempty"` } - type ConnectionInitParameters struct { - -// Source IP address to use with affinity. -// Use source IP address -SourceIP *bool `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + // Source IP address to use with affinity. + // Use source IP address + SourceIP *bool `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type ConnectionObservation struct { - -// Source IP address to use with affinity. -// Use source IP address -SourceIP *bool `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + // Source IP address to use with affinity. + // Use source IP address + SourceIP *bool `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type ConnectionParameters struct { - -// Source IP address to use with affinity. -// Use source IP address -// +kubebuilder:validation:Optional -SourceIP *bool `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + // Source IP address to use with affinity. + // Use source IP address + // +kubebuilder:validation:Optional + SourceIP *bool `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type CookieInitParameters struct { + // Name of the backend. + // Name of the HTTP cookie + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the backend. -// Name of the HTTP cookie -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// TTL for the cookie (if not set, session cookie will be used) -// TTL for the cookie (if not set, session cookie will be used) -TTL *string `json:"ttl,omitempty" tf:"ttl,omitempty"` + // TTL for the cookie (if not set, session cookie will be used) + // TTL for the cookie (if not set, session cookie will be used) + TTL *string `json:"ttl,omitempty" tf:"ttl,omitempty"` } - type CookieObservation struct { + // Name of the backend. + // Name of the HTTP cookie + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the backend. -// Name of the HTTP cookie -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// TTL for the cookie (if not set, session cookie will be used) -// TTL for the cookie (if not set, session cookie will be used) -TTL *string `json:"ttl,omitempty" tf:"ttl,omitempty"` + // TTL for the cookie (if not set, session cookie will be used) + // TTL for the cookie (if not set, session cookie will be used) + TTL *string `json:"ttl,omitempty" tf:"ttl,omitempty"` } - type CookieParameters struct { + // Name of the backend. + // Name of the HTTP cookie + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// Name of the backend. -// Name of the HTTP cookie -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// TTL for the cookie (if not set, session cookie will be used) -// TTL for the cookie (if not set, session cookie will be used) -// +kubebuilder:validation:Optional -TTL *string `json:"ttl,omitempty" tf:"ttl,omitempty"` + // TTL for the cookie (if not set, session cookie will be used) + // TTL for the cookie (if not set, session cookie will be used) + // +kubebuilder:validation:Optional + TTL *string `json:"ttl,omitempty" tf:"ttl,omitempty"` } - type GRPCBackendInitParameters struct { + // Healthcheck specification that will be used by this backend. Structure is documented below. + Healthcheck []HealthcheckInitParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -// Healthcheck specification that will be used by this backend. Structure is documented below. -Healthcheck []HealthcheckInitParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` - -// Load Balancing Config specification that will be used by this backend. Structure is documented below. -LoadBalancingConfig []LoadBalancingConfigInitParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` + // Load Balancing Config specification that will be used by this backend. Structure is documented below. + LoadBalancingConfig []LoadBalancingConfigInitParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` -// Name of the backend. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the backend. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Port for incoming traffic. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port for incoming traffic. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Tls specification that will be used by this backend. Structure is documented below. -TLS []TLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` + // Tls specification that will be used by this backend. Structure is documented below. + TLS []TLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` -// References target groups for the backend. -// +crossplane:generate:reference:type=TargetGroup -TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` + // References target groups for the backend. + // +crossplane:generate:reference:type=TargetGroup + TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` -// References to TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` + // References to TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` -// Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type GRPCBackendObservation struct { + // Healthcheck specification that will be used by this backend. Structure is documented below. + Healthcheck []HealthcheckObservation `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -// Healthcheck specification that will be used by this backend. Structure is documented below. -Healthcheck []HealthcheckObservation `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` + // Load Balancing Config specification that will be used by this backend. Structure is documented below. + LoadBalancingConfig []LoadBalancingConfigObservation `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` -// Load Balancing Config specification that will be used by this backend. Structure is documented below. -LoadBalancingConfig []LoadBalancingConfigObservation `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` + // Name of the backend. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the backend. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Port for incoming traffic. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Port for incoming traffic. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Tls specification that will be used by this backend. Structure is documented below. + TLS []TLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` -// Tls specification that will be used by this backend. Structure is documented below. -TLS []TLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` + // References target groups for the backend. + TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` -// References target groups for the backend. -TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` - -// Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type GRPCBackendParameters struct { + // Healthcheck specification that will be used by this backend. Structure is documented below. + // +kubebuilder:validation:Optional + Healthcheck []HealthcheckParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -// Healthcheck specification that will be used by this backend. Structure is documented below. -// +kubebuilder:validation:Optional -Healthcheck []HealthcheckParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` - -// Load Balancing Config specification that will be used by this backend. Structure is documented below. -// +kubebuilder:validation:Optional -LoadBalancingConfig []LoadBalancingConfigParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` + // Load Balancing Config specification that will be used by this backend. Structure is documented below. + // +kubebuilder:validation:Optional + LoadBalancingConfig []LoadBalancingConfigParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` -// Name of the backend. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // Name of the backend. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// Port for incoming traffic. -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port for incoming traffic. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Tls specification that will be used by this backend. Structure is documented below. -// +kubebuilder:validation:Optional -TLS []TLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` + // Tls specification that will be used by this backend. Structure is documented below. + // +kubebuilder:validation:Optional + TLS []TLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` -// References target groups for the backend. -// +crossplane:generate:reference:type=TargetGroup -// +kubebuilder:validation:Optional -TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` + // References target groups for the backend. + // +crossplane:generate:reference:type=TargetGroup + // +kubebuilder:validation:Optional + TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` -// References to TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` + // References to TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` -// Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. -// +kubebuilder:validation:Optional -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type GRPCHealthcheckInitParameters struct { - -// Service name for grpc.health.v1.HealthCheckRequest message. -ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + // Service name for grpc.health.v1.HealthCheckRequest message. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` } - type GRPCHealthcheckObservation struct { - -// Service name for grpc.health.v1.HealthCheckRequest message. -ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + // Service name for grpc.health.v1.HealthCheckRequest message. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` } - type GRPCHealthcheckParameters struct { - -// Service name for grpc.health.v1.HealthCheckRequest message. -// +kubebuilder:validation:Optional -ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + // Service name for grpc.health.v1.HealthCheckRequest message. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` } - type HTTPBackendHealthcheckInitParameters struct { + // Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. + GRPCHealthcheck []HealthcheckGRPCHealthcheckInitParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` -// Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. -GRPCHealthcheck []HealthcheckGRPCHealthcheckInitParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` - -// Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. -HTTPHealthcheck []HealthcheckHTTPHealthcheckInitParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` + // Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. + HTTPHealthcheck []HealthcheckHTTPHealthcheckInitParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` -// Optional alternative port for health checking. -HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` + // Optional alternative port for health checking. + HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` -// Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Interval between health checks. -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // Interval between health checks. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. -IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` + // An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. + IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` -// Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. -StreamHealthcheck []HealthcheckStreamHealthcheckInitParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` + // Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. + StreamHealthcheck []HealthcheckStreamHealthcheckInitParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` -// Time to wait for a health check response. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + // Time to wait for a health check response. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type HTTPBackendHealthcheckObservation struct { + // Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. + GRPCHealthcheck []HealthcheckGRPCHealthcheckObservation `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` -// Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. -GRPCHealthcheck []HealthcheckGRPCHealthcheckObservation `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` + // Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. + HTTPHealthcheck []HealthcheckHTTPHealthcheckObservation `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` -// Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. -HTTPHealthcheck []HealthcheckHTTPHealthcheckObservation `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` + // Optional alternative port for health checking. + HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` -// Optional alternative port for health checking. -HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` + // Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Interval between health checks. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// Interval between health checks. -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. + IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` -// An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. -IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` + // Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. + StreamHealthcheck []HealthcheckStreamHealthcheckObservation `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` -// Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. -StreamHealthcheck []HealthcheckStreamHealthcheckObservation `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` + // Time to wait for a health check response. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Time to wait for a health check response. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` - -// Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type HTTPBackendHealthcheckParameters struct { + // Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. + // +kubebuilder:validation:Optional + GRPCHealthcheck []HealthcheckGRPCHealthcheckParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` -// Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. -// +kubebuilder:validation:Optional -GRPCHealthcheck []HealthcheckGRPCHealthcheckParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` - -// Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. -// +kubebuilder:validation:Optional -HTTPHealthcheck []HealthcheckHTTPHealthcheckParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` + // Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. + // +kubebuilder:validation:Optional + HTTPHealthcheck []HealthcheckHTTPHealthcheckParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` -// Optional alternative port for health checking. -// +kubebuilder:validation:Optional -HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` + // Optional alternative port for health checking. + // +kubebuilder:validation:Optional + HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` -// Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. -// +kubebuilder:validation:Optional -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Interval between health checks. -// +kubebuilder:validation:Optional -Interval *string `json:"interval" tf:"interval,omitempty"` + // Interval between health checks. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` -// An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. -// +kubebuilder:validation:Optional -IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` + // An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. + // +kubebuilder:validation:Optional + IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` -// Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. -// +kubebuilder:validation:Optional -StreamHealthcheck []HealthcheckStreamHealthcheckParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` + // Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. + // +kubebuilder:validation:Optional + StreamHealthcheck []HealthcheckStreamHealthcheckParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` -// Time to wait for a health check response. -// +kubebuilder:validation:Optional -Timeout *string `json:"timeout" tf:"timeout,omitempty"` + // Time to wait for a health check response. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout" tf:"timeout,omitempty"` -// Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. -// +kubebuilder:validation:Optional -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type HTTPBackendInitParameters struct { + // Healthcheck specification that will be used by this backend. Structure is documented below. + Healthcheck []HTTPBackendHealthcheckInitParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -// Healthcheck specification that will be used by this backend. Structure is documented below. -Healthcheck []HTTPBackendHealthcheckInitParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` - -// Enables HTTP2 for upstream requests. If not set, HTTP 1.1 will be used by default. -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` + // Enables HTTP2 for upstream requests. If not set, HTTP 1.1 will be used by default. + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// Load Balancing Config specification that will be used by this backend. Structure is documented below. -LoadBalancingConfig []HTTPBackendLoadBalancingConfigInitParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` + // Load Balancing Config specification that will be used by this backend. Structure is documented below. + LoadBalancingConfig []HTTPBackendLoadBalancingConfigInitParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` -// Name of the backend. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the backend. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Port for incoming traffic. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port for incoming traffic. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -StorageBucket *string `json:"storageBucket,omitempty" tf:"storage_bucket,omitempty"` + StorageBucket *string `json:"storageBucket,omitempty" tf:"storage_bucket,omitempty"` -// Tls specification that will be used by this backend. Structure is documented below. -TLS []HTTPBackendTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` + // Tls specification that will be used by this backend. Structure is documented below. + TLS []HTTPBackendTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` -// References target groups for the backend. -// +crossplane:generate:reference:type=TargetGroup -TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` + // References target groups for the backend. + // +crossplane:generate:reference:type=TargetGroup + TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` -// References to TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` + // References to TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` -// Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type HTTPBackendLoadBalancingConfigInitParameters struct { + // Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. + LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` -// Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. -LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` + // Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. + PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` -// If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. -PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` - -// If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. -StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` + // If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. + StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` } - type HTTPBackendLoadBalancingConfigObservation struct { + // Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. + LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` -// Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. -LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` + // Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. + PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` -// If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. -PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` - -// If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. -StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` + // If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. + StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` } - type HTTPBackendLoadBalancingConfigParameters struct { + // Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. + // +kubebuilder:validation:Optional + LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` -// Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. -// +kubebuilder:validation:Optional -LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` - -// Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". -// +kubebuilder:validation:Optional -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. -// +kubebuilder:validation:Optional -PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` + // If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. + // +kubebuilder:validation:Optional + PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` -// If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. -// +kubebuilder:validation:Optional -StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` + // If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. + // +kubebuilder:validation:Optional + StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` } - type HTTPBackendObservation struct { + // Healthcheck specification that will be used by this backend. Structure is documented below. + Healthcheck []HTTPBackendHealthcheckObservation `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -// Healthcheck specification that will be used by this backend. Structure is documented below. -Healthcheck []HTTPBackendHealthcheckObservation `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` - -// Enables HTTP2 for upstream requests. If not set, HTTP 1.1 will be used by default. -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` + // Enables HTTP2 for upstream requests. If not set, HTTP 1.1 will be used by default. + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// Load Balancing Config specification that will be used by this backend. Structure is documented below. -LoadBalancingConfig []HTTPBackendLoadBalancingConfigObservation `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` + // Load Balancing Config specification that will be used by this backend. Structure is documented below. + LoadBalancingConfig []HTTPBackendLoadBalancingConfigObservation `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` -// Name of the backend. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the backend. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Port for incoming traffic. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port for incoming traffic. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -StorageBucket *string `json:"storageBucket,omitempty" tf:"storage_bucket,omitempty"` + StorageBucket *string `json:"storageBucket,omitempty" tf:"storage_bucket,omitempty"` -// Tls specification that will be used by this backend. Structure is documented below. -TLS []HTTPBackendTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` + // Tls specification that will be used by this backend. Structure is documented below. + TLS []HTTPBackendTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` -// References target groups for the backend. -TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` + // References target groups for the backend. + TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` -// Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type HTTPBackendParameters struct { + // Healthcheck specification that will be used by this backend. Structure is documented below. + // +kubebuilder:validation:Optional + Healthcheck []HTTPBackendHealthcheckParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -// Healthcheck specification that will be used by this backend. Structure is documented below. -// +kubebuilder:validation:Optional -Healthcheck []HTTPBackendHealthcheckParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` + // Enables HTTP2 for upstream requests. If not set, HTTP 1.1 will be used by default. + // +kubebuilder:validation:Optional + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// Enables HTTP2 for upstream requests. If not set, HTTP 1.1 will be used by default. -// +kubebuilder:validation:Optional -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` + // Load Balancing Config specification that will be used by this backend. Structure is documented below. + // +kubebuilder:validation:Optional + LoadBalancingConfig []HTTPBackendLoadBalancingConfigParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` -// Load Balancing Config specification that will be used by this backend. Structure is documented below. -// +kubebuilder:validation:Optional -LoadBalancingConfig []HTTPBackendLoadBalancingConfigParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` + // Name of the backend. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// Name of the backend. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // Port for incoming traffic. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Port for incoming traffic. -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // +kubebuilder:validation:Optional + StorageBucket *string `json:"storageBucket,omitempty" tf:"storage_bucket,omitempty"` -// +kubebuilder:validation:Optional -StorageBucket *string `json:"storageBucket,omitempty" tf:"storage_bucket,omitempty"` + // Tls specification that will be used by this backend. Structure is documented below. + // +kubebuilder:validation:Optional + TLS []HTTPBackendTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` -// Tls specification that will be used by this backend. Structure is documented below. -// +kubebuilder:validation:Optional -TLS []HTTPBackendTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` + // References target groups for the backend. + // +crossplane:generate:reference:type=TargetGroup + // +kubebuilder:validation:Optional + TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` -// References target groups for the backend. -// +crossplane:generate:reference:type=TargetGroup -// +kubebuilder:validation:Optional -TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` + // References to TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` -// References to TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` + // Selector for a list of TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` -// Selector for a list of TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` - -// Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. -// +kubebuilder:validation:Optional -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type HTTPBackendTLSInitParameters struct { + // SNI string for TLS connections. + Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` -// SNI string for TLS connections. -Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` - -ValidationContext []TLSValidationContextInitParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` + ValidationContext []TLSValidationContextInitParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` } - type HTTPBackendTLSObservation struct { + // SNI string for TLS connections. + Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` -// SNI string for TLS connections. -Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` - -ValidationContext []TLSValidationContextObservation `json:"validationContext,omitempty" tf:"validation_context,omitempty"` + ValidationContext []TLSValidationContextObservation `json:"validationContext,omitempty" tf:"validation_context,omitempty"` } - type HTTPBackendTLSParameters struct { + // SNI string for TLS connections. + // +kubebuilder:validation:Optional + Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` -// SNI string for TLS connections. -// +kubebuilder:validation:Optional -Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` - -// +kubebuilder:validation:Optional -ValidationContext []TLSValidationContextParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` + // +kubebuilder:validation:Optional + ValidationContext []TLSValidationContextParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` } - type HTTPHealthcheckInitParameters struct { + // "Host" HTTP header value. + Host *string `json:"host,omitempty" tf:"host,omitempty"` -// "Host" HTTP header value. -Host *string `json:"host,omitempty" tf:"host,omitempty"` - -// If set, health checks will use HTTP2. -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` + // If set, health checks will use HTTP2. + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// HTTP path. -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // HTTP path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` } - type HTTPHealthcheckObservation struct { + // "Host" HTTP header value. + Host *string `json:"host,omitempty" tf:"host,omitempty"` -// "Host" HTTP header value. -Host *string `json:"host,omitempty" tf:"host,omitempty"` - -// If set, health checks will use HTTP2. -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` + // If set, health checks will use HTTP2. + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// HTTP path. -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // HTTP path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` } - type HTTPHealthcheckParameters struct { + // "Host" HTTP header value. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` -// "Host" HTTP header value. -// +kubebuilder:validation:Optional -Host *string `json:"host,omitempty" tf:"host,omitempty"` + // If set, health checks will use HTTP2. + // +kubebuilder:validation:Optional + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// If set, health checks will use HTTP2. -// +kubebuilder:validation:Optional -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` - -// HTTP path. -// +kubebuilder:validation:Optional -Path *string `json:"path" tf:"path,omitempty"` + // HTTP path. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` } - type HeaderInitParameters struct { - -// The name of the request header that will be used with affinity. -// The name of the request header that will be used -HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + // The name of the request header that will be used with affinity. + // The name of the request header that will be used + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` } - type HeaderObservation struct { - -// The name of the request header that will be used with affinity. -// The name of the request header that will be used -HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + // The name of the request header that will be used with affinity. + // The name of the request header that will be used + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` } - type HeaderParameters struct { - -// The name of the request header that will be used with affinity. -// The name of the request header that will be used -// +kubebuilder:validation:Optional -HeaderName *string `json:"headerName" tf:"header_name,omitempty"` + // The name of the request header that will be used with affinity. + // The name of the request header that will be used + // +kubebuilder:validation:Optional + HeaderName *string `json:"headerName" tf:"header_name,omitempty"` } - type HealthcheckGRPCHealthcheckInitParameters struct { - -// Service name for grpc.health.v1.HealthCheckRequest message. -ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + // Service name for grpc.health.v1.HealthCheckRequest message. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` } - type HealthcheckGRPCHealthcheckObservation struct { - -// Service name for grpc.health.v1.HealthCheckRequest message. -ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + // Service name for grpc.health.v1.HealthCheckRequest message. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` } - type HealthcheckGRPCHealthcheckParameters struct { - -// Service name for grpc.health.v1.HealthCheckRequest message. -// +kubebuilder:validation:Optional -ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + // Service name for grpc.health.v1.HealthCheckRequest message. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` } - type HealthcheckHTTPHealthcheckInitParameters struct { + // "Host" HTTP header value. + Host *string `json:"host,omitempty" tf:"host,omitempty"` -// "Host" HTTP header value. -Host *string `json:"host,omitempty" tf:"host,omitempty"` + // If set, health checks will use HTTP2. + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// If set, health checks will use HTTP2. -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` - -// HTTP path. -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // HTTP path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` } - type HealthcheckHTTPHealthcheckObservation struct { + // "Host" HTTP header value. + Host *string `json:"host,omitempty" tf:"host,omitempty"` -// "Host" HTTP header value. -Host *string `json:"host,omitempty" tf:"host,omitempty"` + // If set, health checks will use HTTP2. + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// If set, health checks will use HTTP2. -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` - -// HTTP path. -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // HTTP path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` } - type HealthcheckHTTPHealthcheckParameters struct { + // "Host" HTTP header value. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` -// "Host" HTTP header value. -// +kubebuilder:validation:Optional -Host *string `json:"host,omitempty" tf:"host,omitempty"` - -// If set, health checks will use HTTP2. -// +kubebuilder:validation:Optional -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` + // If set, health checks will use HTTP2. + // +kubebuilder:validation:Optional + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// HTTP path. -// +kubebuilder:validation:Optional -Path *string `json:"path" tf:"path,omitempty"` + // HTTP path. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` } - type HealthcheckInitParameters struct { + // Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. + GRPCHealthcheck []GRPCHealthcheckInitParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` -// Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. -GRPCHealthcheck []GRPCHealthcheckInitParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` - -// Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. -HTTPHealthcheck []HTTPHealthcheckInitParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` + // Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. + HTTPHealthcheck []HTTPHealthcheckInitParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` -// Optional alternative port for health checking. -HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` + // Optional alternative port for health checking. + HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` -// Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Interval between health checks. -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // Interval between health checks. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. -IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` + // An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. + IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` -// Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. -StreamHealthcheck []StreamHealthcheckInitParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` + // Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. + StreamHealthcheck []StreamHealthcheckInitParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` -// Time to wait for a health check response. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + // Time to wait for a health check response. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type HealthcheckObservation struct { + // Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. + GRPCHealthcheck []GRPCHealthcheckObservation `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` -// Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. -GRPCHealthcheck []GRPCHealthcheckObservation `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` + // Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. + HTTPHealthcheck []HTTPHealthcheckObservation `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` -// Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. -HTTPHealthcheck []HTTPHealthcheckObservation `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` + // Optional alternative port for health checking. + HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` -// Optional alternative port for health checking. -HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` + // Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Interval between health checks. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// Interval between health checks. -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. + IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` -// An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. -IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` + // Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. + StreamHealthcheck []StreamHealthcheckObservation `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` -// Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. -StreamHealthcheck []StreamHealthcheckObservation `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` + // Time to wait for a health check response. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Time to wait for a health check response. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` - -// Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type HealthcheckParameters struct { + // Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. + // +kubebuilder:validation:Optional + GRPCHealthcheck []GRPCHealthcheckParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` -// Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. -// +kubebuilder:validation:Optional -GRPCHealthcheck []GRPCHealthcheckParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` - -// Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. -// +kubebuilder:validation:Optional -HTTPHealthcheck []HTTPHealthcheckParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` + // Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. + // +kubebuilder:validation:Optional + HTTPHealthcheck []HTTPHealthcheckParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` -// Optional alternative port for health checking. -// +kubebuilder:validation:Optional -HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` + // Optional alternative port for health checking. + // +kubebuilder:validation:Optional + HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` -// Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. -// +kubebuilder:validation:Optional -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Interval between health checks. -// +kubebuilder:validation:Optional -Interval *string `json:"interval" tf:"interval,omitempty"` + // Interval between health checks. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` -// An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. -// +kubebuilder:validation:Optional -IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` + // An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. + // +kubebuilder:validation:Optional + IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` -// Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. -// +kubebuilder:validation:Optional -StreamHealthcheck []StreamHealthcheckParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` + // Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. + // +kubebuilder:validation:Optional + StreamHealthcheck []StreamHealthcheckParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` -// Time to wait for a health check response. -// +kubebuilder:validation:Optional -Timeout *string `json:"timeout" tf:"timeout,omitempty"` + // Time to wait for a health check response. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout" tf:"timeout,omitempty"` -// Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. -// +kubebuilder:validation:Optional -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type HealthcheckStreamHealthcheckInitParameters struct { + // Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. + Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` -// Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. -Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` - -// Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. -Send *string `json:"send,omitempty" tf:"send,omitempty"` + // Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. + Send *string `json:"send,omitempty" tf:"send,omitempty"` } - type HealthcheckStreamHealthcheckObservation struct { + // Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. + Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` -// Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. -Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` - -// Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. -Send *string `json:"send,omitempty" tf:"send,omitempty"` + // Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. + Send *string `json:"send,omitempty" tf:"send,omitempty"` } - type HealthcheckStreamHealthcheckParameters struct { + // Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. + // +kubebuilder:validation:Optional + Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` -// Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. -// +kubebuilder:validation:Optional -Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` - -// Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. -// +kubebuilder:validation:Optional -Send *string `json:"send,omitempty" tf:"send,omitempty"` + // Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. + // +kubebuilder:validation:Optional + Send *string `json:"send,omitempty" tf:"send,omitempty"` } - type LoadBalancingConfigInitParameters struct { + // Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. + LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` -// Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. -LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` - -// Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. -PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` + // If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. + PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` -// If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. -StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` + // If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. + StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` } - type LoadBalancingConfigObservation struct { + // Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. + LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` -// Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. -LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` - -// Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. -PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` + // If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. + PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` -// If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. -StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` + // If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. + StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` } - type LoadBalancingConfigParameters struct { + // Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. + // +kubebuilder:validation:Optional + LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` -// Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. -// +kubebuilder:validation:Optional -LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` + // Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". -// +kubebuilder:validation:Optional -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. + // +kubebuilder:validation:Optional + PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` -// If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. -// +kubebuilder:validation:Optional -PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` - -// If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. -// +kubebuilder:validation:Optional -StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` + // If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. + // +kubebuilder:validation:Optional + StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` } - type SessionAffinityInitParameters struct { + // Requests received from the same IP are combined into a session. Stream backend groups only support session affinity by client IP address. Structure is documented below. + // IP address affinity + Connection []ConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// Requests received from the same IP are combined into a session. Stream backend groups only support session affinity by client IP address. Structure is documented below. -// IP address affinity -Connection []ConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` - -// Requests with the same cookie value and the specified file name are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. -// Cookie affinity -Cookie []CookieInitParameters `json:"cookie,omitempty" tf:"cookie,omitempty"` + // Requests with the same cookie value and the specified file name are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + // Cookie affinity + Cookie []CookieInitParameters `json:"cookie,omitempty" tf:"cookie,omitempty"` -// Requests with the same value of the specified HTTP header, such as with user authentication data, are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. -// Request header affinity -Header []HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + // Requests with the same value of the specified HTTP header, such as with user authentication data, are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + // Request header affinity + Header []HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` } - type SessionAffinityObservation struct { + // Requests received from the same IP are combined into a session. Stream backend groups only support session affinity by client IP address. Structure is documented below. + // IP address affinity + Connection []ConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// Requests received from the same IP are combined into a session. Stream backend groups only support session affinity by client IP address. Structure is documented below. -// IP address affinity -Connection []ConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` - -// Requests with the same cookie value and the specified file name are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. -// Cookie affinity -Cookie []CookieObservation `json:"cookie,omitempty" tf:"cookie,omitempty"` + // Requests with the same cookie value and the specified file name are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + // Cookie affinity + Cookie []CookieObservation `json:"cookie,omitempty" tf:"cookie,omitempty"` -// Requests with the same value of the specified HTTP header, such as with user authentication data, are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. -// Request header affinity -Header []HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + // Requests with the same value of the specified HTTP header, such as with user authentication data, are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + // Request header affinity + Header []HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` } - type SessionAffinityParameters struct { + // Requests received from the same IP are combined into a session. Stream backend groups only support session affinity by client IP address. Structure is documented below. + // IP address affinity + // +kubebuilder:validation:Optional + Connection []ConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// Requests received from the same IP are combined into a session. Stream backend groups only support session affinity by client IP address. Structure is documented below. -// IP address affinity -// +kubebuilder:validation:Optional -Connection []ConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // Requests with the same cookie value and the specified file name are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + // Cookie affinity + // +kubebuilder:validation:Optional + Cookie []CookieParameters `json:"cookie,omitempty" tf:"cookie,omitempty"` -// Requests with the same cookie value and the specified file name are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. -// Cookie affinity -// +kubebuilder:validation:Optional -Cookie []CookieParameters `json:"cookie,omitempty" tf:"cookie,omitempty"` - -// Requests with the same value of the specified HTTP header, such as with user authentication data, are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. -// Request header affinity -// +kubebuilder:validation:Optional -Header []HeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + // Requests with the same value of the specified HTTP header, such as with user authentication data, are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + // Request header affinity + // +kubebuilder:validation:Optional + Header []HeaderParameters `json:"header,omitempty" tf:"header,omitempty"` } - type StreamBackendHealthcheckGRPCHealthcheckInitParameters struct { - -// Service name for grpc.health.v1.HealthCheckRequest message. -ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + // Service name for grpc.health.v1.HealthCheckRequest message. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` } - type StreamBackendHealthcheckGRPCHealthcheckObservation struct { - -// Service name for grpc.health.v1.HealthCheckRequest message. -ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + // Service name for grpc.health.v1.HealthCheckRequest message. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` } - type StreamBackendHealthcheckGRPCHealthcheckParameters struct { - -// Service name for grpc.health.v1.HealthCheckRequest message. -// +kubebuilder:validation:Optional -ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + // Service name for grpc.health.v1.HealthCheckRequest message. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` } - type StreamBackendHealthcheckHTTPHealthcheckInitParameters struct { + // "Host" HTTP header value. + Host *string `json:"host,omitempty" tf:"host,omitempty"` -// "Host" HTTP header value. -Host *string `json:"host,omitempty" tf:"host,omitempty"` + // If set, health checks will use HTTP2. + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// If set, health checks will use HTTP2. -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` - -// HTTP path. -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // HTTP path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` } - type StreamBackendHealthcheckHTTPHealthcheckObservation struct { + // "Host" HTTP header value. + Host *string `json:"host,omitempty" tf:"host,omitempty"` -// "Host" HTTP header value. -Host *string `json:"host,omitempty" tf:"host,omitempty"` - -// If set, health checks will use HTTP2. -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` + // If set, health checks will use HTTP2. + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// HTTP path. -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // HTTP path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` } - type StreamBackendHealthcheckHTTPHealthcheckParameters struct { + // "Host" HTTP header value. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` -// "Host" HTTP header value. -// +kubebuilder:validation:Optional -Host *string `json:"host,omitempty" tf:"host,omitempty"` - -// If set, health checks will use HTTP2. -// +kubebuilder:validation:Optional -Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` + // If set, health checks will use HTTP2. + // +kubebuilder:validation:Optional + Http2 *bool `json:"http2,omitempty" tf:"http2,omitempty"` -// HTTP path. -// +kubebuilder:validation:Optional -Path *string `json:"path" tf:"path,omitempty"` + // HTTP path. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` } - type StreamBackendHealthcheckInitParameters struct { + // Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. + GRPCHealthcheck []StreamBackendHealthcheckGRPCHealthcheckInitParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` -// Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. -GRPCHealthcheck []StreamBackendHealthcheckGRPCHealthcheckInitParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` + // Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. + HTTPHealthcheck []StreamBackendHealthcheckHTTPHealthcheckInitParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` -// Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. -HTTPHealthcheck []StreamBackendHealthcheckHTTPHealthcheckInitParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` + // Optional alternative port for health checking. + HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` -// Optional alternative port for health checking. -HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` + // Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Interval between health checks. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// Interval between health checks. -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. + IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` -// An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. -IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` + // Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. + StreamHealthcheck []StreamBackendHealthcheckStreamHealthcheckInitParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` -// Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. -StreamHealthcheck []StreamBackendHealthcheckStreamHealthcheckInitParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` + // Time to wait for a health check response. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Time to wait for a health check response. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` - -// Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type StreamBackendHealthcheckObservation struct { + // Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. + GRPCHealthcheck []StreamBackendHealthcheckGRPCHealthcheckObservation `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` -// Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. -GRPCHealthcheck []StreamBackendHealthcheckGRPCHealthcheckObservation `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` - -// Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. -HTTPHealthcheck []StreamBackendHealthcheckHTTPHealthcheckObservation `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` + // Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. + HTTPHealthcheck []StreamBackendHealthcheckHTTPHealthcheckObservation `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` -// Optional alternative port for health checking. -HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` + // Optional alternative port for health checking. + HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` -// Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Interval between health checks. -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // Interval between health checks. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. -IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` + // An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. + IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` -// Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. -StreamHealthcheck []StreamBackendHealthcheckStreamHealthcheckObservation `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` + // Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. + StreamHealthcheck []StreamBackendHealthcheckStreamHealthcheckObservation `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` -// Time to wait for a health check response. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + // Time to wait for a health check response. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type StreamBackendHealthcheckParameters struct { + // Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. + // +kubebuilder:validation:Optional + GRPCHealthcheck []StreamBackendHealthcheckGRPCHealthcheckParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` -// Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below. -// +kubebuilder:validation:Optional -GRPCHealthcheck []StreamBackendHealthcheckGRPCHealthcheckParameters `json:"grpcHealthcheck,omitempty" tf:"grpc_healthcheck,omitempty"` - -// Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. -// +kubebuilder:validation:Optional -HTTPHealthcheck []StreamBackendHealthcheckHTTPHealthcheckParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` + // Http Healthcheck specification that will be used by this healthcheck. Structure is documented below. + // +kubebuilder:validation:Optional + HTTPHealthcheck []StreamBackendHealthcheckHTTPHealthcheckParameters `json:"httpHealthcheck,omitempty" tf:"http_healthcheck,omitempty"` -// Optional alternative port for health checking. -// +kubebuilder:validation:Optional -HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` + // Optional alternative port for health checking. + // +kubebuilder:validation:Optional + HealthcheckPort *float64 `json:"healthcheckPort,omitempty" tf:"healthcheck_port,omitempty"` -// Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. -// +kubebuilder:validation:Optional -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Interval between health checks. -// +kubebuilder:validation:Optional -Interval *string `json:"interval" tf:"interval,omitempty"` + // Interval between health checks. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` -// An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. -// +kubebuilder:validation:Optional -IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` + // An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time. + // +kubebuilder:validation:Optional + IntervalJitterPercent *float64 `json:"intervalJitterPercent,omitempty" tf:"interval_jitter_percent,omitempty"` -// Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. -// +kubebuilder:validation:Optional -StreamHealthcheck []StreamBackendHealthcheckStreamHealthcheckParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` + // Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below. + // +kubebuilder:validation:Optional + StreamHealthcheck []StreamBackendHealthcheckStreamHealthcheckParameters `json:"streamHealthcheck,omitempty" tf:"stream_healthcheck,omitempty"` -// Time to wait for a health check response. -// +kubebuilder:validation:Optional -Timeout *string `json:"timeout" tf:"timeout,omitempty"` + // Time to wait for a health check response. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout" tf:"timeout,omitempty"` -// Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. -// +kubebuilder:validation:Optional -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type StreamBackendHealthcheckStreamHealthcheckInitParameters struct { + // Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. + Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` -// Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. -Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` - -// Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. -Send *string `json:"send,omitempty" tf:"send,omitempty"` + // Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. + Send *string `json:"send,omitempty" tf:"send,omitempty"` } - type StreamBackendHealthcheckStreamHealthcheckObservation struct { + // Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. + Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` -// Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. -Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` - -// Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. -Send *string `json:"send,omitempty" tf:"send,omitempty"` + // Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. + Send *string `json:"send,omitempty" tf:"send,omitempty"` } - type StreamBackendHealthcheckStreamHealthcheckParameters struct { + // Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. + // +kubebuilder:validation:Optional + Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` -// Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. -// +kubebuilder:validation:Optional -Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` - -// Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. -// +kubebuilder:validation:Optional -Send *string `json:"send,omitempty" tf:"send,omitempty"` + // Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. + // +kubebuilder:validation:Optional + Send *string `json:"send,omitempty" tf:"send,omitempty"` } - type StreamBackendInitParameters struct { + EnableProxyProtocol *bool `json:"enableProxyProtocol,omitempty" tf:"enable_proxy_protocol,omitempty"` + // Healthcheck specification that will be used by this backend. Structure is documented below. + Healthcheck []StreamBackendHealthcheckInitParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -EnableProxyProtocol *bool `json:"enableProxyProtocol,omitempty" tf:"enable_proxy_protocol,omitempty"` + // Load Balancing Config specification that will be used by this backend. Structure is documented below. + LoadBalancingConfig []StreamBackendLoadBalancingConfigInitParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` -// Healthcheck specification that will be used by this backend. Structure is documented below. -Healthcheck []StreamBackendHealthcheckInitParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` + // Name of the backend. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Load Balancing Config specification that will be used by this backend. Structure is documented below. -LoadBalancingConfig []StreamBackendLoadBalancingConfigInitParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` + // Port for incoming traffic. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Name of the backend. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Tls specification that will be used by this backend. Structure is documented below. + TLS []StreamBackendTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` -// Port for incoming traffic. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // References target groups for the backend. + // +crossplane:generate:reference:type=TargetGroup + TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` -// Tls specification that will be used by this backend. Structure is documented below. -TLS []StreamBackendTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` + // References to TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` -// References target groups for the backend. -// +crossplane:generate:reference:type=TargetGroup -TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` + // Selector for a list of TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` -// References to TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` - -// Selector for a list of TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` - -// Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type StreamBackendLoadBalancingConfigInitParameters struct { + // Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. + LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` -// Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. -LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` + // Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. + PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` -// If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. -PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` - -// If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. -StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` + // If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. + StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` } - type StreamBackendLoadBalancingConfigObservation struct { + // Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. + LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` -// Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. -LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` + // Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. + PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` -// If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. -PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` - -// If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. -StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` + // If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. + StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` } - type StreamBackendLoadBalancingConfigParameters struct { + // Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. + // +kubebuilder:validation:Optional + LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` -// Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones. -// +kubebuilder:validation:Optional -LocalityAwareRoutingPercent *float64 `json:"localityAwareRoutingPercent,omitempty" tf:"locality_aware_routing_percent,omitempty"` - -// Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". -// +kubebuilder:validation:Optional -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Load balancing mode for the backend. Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", "MAGLEV_HASH". + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. -// +kubebuilder:validation:Optional -PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` + // If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold. + // +kubebuilder:validation:Optional + PanicThreshold *float64 `json:"panicThreshold,omitempty" tf:"panic_threshold,omitempty"` -// If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. -// +kubebuilder:validation:Optional -StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` + // If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones. + // +kubebuilder:validation:Optional + StrictLocality *bool `json:"strictLocality,omitempty" tf:"strict_locality,omitempty"` } - type StreamBackendObservation struct { + EnableProxyProtocol *bool `json:"enableProxyProtocol,omitempty" tf:"enable_proxy_protocol,omitempty"` + // Healthcheck specification that will be used by this backend. Structure is documented below. + Healthcheck []StreamBackendHealthcheckObservation `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -EnableProxyProtocol *bool `json:"enableProxyProtocol,omitempty" tf:"enable_proxy_protocol,omitempty"` - -// Healthcheck specification that will be used by this backend. Structure is documented below. -Healthcheck []StreamBackendHealthcheckObservation `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` - -// Load Balancing Config specification that will be used by this backend. Structure is documented below. -LoadBalancingConfig []StreamBackendLoadBalancingConfigObservation `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` + // Load Balancing Config specification that will be used by this backend. Structure is documented below. + LoadBalancingConfig []StreamBackendLoadBalancingConfigObservation `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` -// Name of the backend. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the backend. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Port for incoming traffic. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port for incoming traffic. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Tls specification that will be used by this backend. Structure is documented below. -TLS []StreamBackendTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` + // Tls specification that will be used by this backend. Structure is documented below. + TLS []StreamBackendTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` -// References target groups for the backend. -TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` + // References target groups for the backend. + TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` -// Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type StreamBackendParameters struct { + // +kubebuilder:validation:Optional + EnableProxyProtocol *bool `json:"enableProxyProtocol,omitempty" tf:"enable_proxy_protocol,omitempty"` -// +kubebuilder:validation:Optional -EnableProxyProtocol *bool `json:"enableProxyProtocol,omitempty" tf:"enable_proxy_protocol,omitempty"` - -// Healthcheck specification that will be used by this backend. Structure is documented below. -// +kubebuilder:validation:Optional -Healthcheck []StreamBackendHealthcheckParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` + // Healthcheck specification that will be used by this backend. Structure is documented below. + // +kubebuilder:validation:Optional + Healthcheck []StreamBackendHealthcheckParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -// Load Balancing Config specification that will be used by this backend. Structure is documented below. -// +kubebuilder:validation:Optional -LoadBalancingConfig []StreamBackendLoadBalancingConfigParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` + // Load Balancing Config specification that will be used by this backend. Structure is documented below. + // +kubebuilder:validation:Optional + LoadBalancingConfig []StreamBackendLoadBalancingConfigParameters `json:"loadBalancingConfig,omitempty" tf:"load_balancing_config,omitempty"` -// Name of the backend. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // Name of the backend. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// Port for incoming traffic. -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port for incoming traffic. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Tls specification that will be used by this backend. Structure is documented below. -// +kubebuilder:validation:Optional -TLS []StreamBackendTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` + // Tls specification that will be used by this backend. Structure is documented below. + // +kubebuilder:validation:Optional + TLS []StreamBackendTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` -// References target groups for the backend. -// +crossplane:generate:reference:type=TargetGroup -// +kubebuilder:validation:Optional -TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` + // References target groups for the backend. + // +crossplane:generate:reference:type=TargetGroup + // +kubebuilder:validation:Optional + TargetGroupIds []*string `json:"targetGroupIds,omitempty" tf:"target_group_ids,omitempty"` -// References to TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` + // References to TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsRefs []v1.Reference `json:"targetGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of TargetGroup to populate targetGroupIds. -// +kubebuilder:validation:Optional -TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of TargetGroup to populate targetGroupIds. + // +kubebuilder:validation:Optional + TargetGroupIdsSelector *v1.Selector `json:"targetGroupIdsSelector,omitempty" tf:"-"` -// Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. -// +kubebuilder:validation:Optional -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type StreamBackendTLSInitParameters struct { + // SNI string for TLS connections. + Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` -// SNI string for TLS connections. -Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` - -ValidationContext []StreamBackendTLSValidationContextInitParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` + ValidationContext []StreamBackendTLSValidationContextInitParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` } - type StreamBackendTLSObservation struct { + // SNI string for TLS connections. + Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` -// SNI string for TLS connections. -Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` - -ValidationContext []StreamBackendTLSValidationContextObservation `json:"validationContext,omitempty" tf:"validation_context,omitempty"` + ValidationContext []StreamBackendTLSValidationContextObservation `json:"validationContext,omitempty" tf:"validation_context,omitempty"` } - type StreamBackendTLSParameters struct { + // SNI string for TLS connections. + // +kubebuilder:validation:Optional + Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` -// SNI string for TLS connections. -// +kubebuilder:validation:Optional -Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` - -// +kubebuilder:validation:Optional -ValidationContext []StreamBackendTLSValidationContextParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` + // +kubebuilder:validation:Optional + ValidationContext []StreamBackendTLSValidationContextParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` } - type StreamBackendTLSValidationContextInitParameters struct { + // PEM-encoded trusted CA certificate chain. + TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` -// PEM-encoded trusted CA certificate chain. -TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` - -// Trusted CA certificate ID in the Certificate Manager. -TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` + // Trusted CA certificate ID in the Certificate Manager. + TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` } - type StreamBackendTLSValidationContextObservation struct { + // PEM-encoded trusted CA certificate chain. + TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` -// PEM-encoded trusted CA certificate chain. -TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` - -// Trusted CA certificate ID in the Certificate Manager. -TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` + // Trusted CA certificate ID in the Certificate Manager. + TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` } - type StreamBackendTLSValidationContextParameters struct { + // PEM-encoded trusted CA certificate chain. + // +kubebuilder:validation:Optional + TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` -// PEM-encoded trusted CA certificate chain. -// +kubebuilder:validation:Optional -TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` - -// Trusted CA certificate ID in the Certificate Manager. -// +kubebuilder:validation:Optional -TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` + // Trusted CA certificate ID in the Certificate Manager. + // +kubebuilder:validation:Optional + TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` } - type StreamHealthcheckInitParameters struct { + // Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. + Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` -// Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. -Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` - -// Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. -Send *string `json:"send,omitempty" tf:"send,omitempty"` + // Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. + Send *string `json:"send,omitempty" tf:"send,omitempty"` } - type StreamHealthcheckObservation struct { + // Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. + Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` -// Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. -Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` - -// Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. -Send *string `json:"send,omitempty" tf:"send,omitempty"` + // Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. + Send *string `json:"send,omitempty" tf:"send,omitempty"` } - type StreamHealthcheckParameters struct { + // Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. + // +kubebuilder:validation:Optional + Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` -// Data that must be contained in the messages received from targets for a successful health check. If not specified, no messages are expected from targets, and those that are received are not checked. -// +kubebuilder:validation:Optional -Receive *string `json:"receive,omitempty" tf:"receive,omitempty"` - -// Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. -// +kubebuilder:validation:Optional -Send *string `json:"send,omitempty" tf:"send,omitempty"` + // Message sent to targets during TCP data transfer. If not specified, no data is sent to the target. + // +kubebuilder:validation:Optional + Send *string `json:"send,omitempty" tf:"send,omitempty"` } - type TLSInitParameters struct { + // SNI string for TLS connections. + Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` -// SNI string for TLS connections. -Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` - -ValidationContext []ValidationContextInitParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` + ValidationContext []ValidationContextInitParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` } - type TLSObservation struct { + // SNI string for TLS connections. + Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` -// SNI string for TLS connections. -Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` - -ValidationContext []ValidationContextObservation `json:"validationContext,omitempty" tf:"validation_context,omitempty"` + ValidationContext []ValidationContextObservation `json:"validationContext,omitempty" tf:"validation_context,omitempty"` } - type TLSParameters struct { + // SNI string for TLS connections. + // +kubebuilder:validation:Optional + Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` -// SNI string for TLS connections. -// +kubebuilder:validation:Optional -Sni *string `json:"sni,omitempty" tf:"sni,omitempty"` - -// +kubebuilder:validation:Optional -ValidationContext []ValidationContextParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` + // +kubebuilder:validation:Optional + ValidationContext []ValidationContextParameters `json:"validationContext,omitempty" tf:"validation_context,omitempty"` } - type TLSValidationContextInitParameters struct { + // PEM-encoded trusted CA certificate chain. + TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` -// PEM-encoded trusted CA certificate chain. -TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` - -// Trusted CA certificate ID in the Certificate Manager. -TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` + // Trusted CA certificate ID in the Certificate Manager. + TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` } - type TLSValidationContextObservation struct { + // PEM-encoded trusted CA certificate chain. + TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` -// PEM-encoded trusted CA certificate chain. -TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` - -// Trusted CA certificate ID in the Certificate Manager. -TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` + // Trusted CA certificate ID in the Certificate Manager. + TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` } - type TLSValidationContextParameters struct { + // PEM-encoded trusted CA certificate chain. + // +kubebuilder:validation:Optional + TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` -// PEM-encoded trusted CA certificate chain. -// +kubebuilder:validation:Optional -TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` - -// Trusted CA certificate ID in the Certificate Manager. -// +kubebuilder:validation:Optional -TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` + // Trusted CA certificate ID in the Certificate Manager. + // +kubebuilder:validation:Optional + TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` } - type ValidationContextInitParameters struct { + // PEM-encoded trusted CA certificate chain. + TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` -// PEM-encoded trusted CA certificate chain. -TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` - -// Trusted CA certificate ID in the Certificate Manager. -TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` + // Trusted CA certificate ID in the Certificate Manager. + TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` } - type ValidationContextObservation struct { + // PEM-encoded trusted CA certificate chain. + TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` -// PEM-encoded trusted CA certificate chain. -TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` - -// Trusted CA certificate ID in the Certificate Manager. -TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` + // Trusted CA certificate ID in the Certificate Manager. + TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` } - type ValidationContextParameters struct { + // PEM-encoded trusted CA certificate chain. + // +kubebuilder:validation:Optional + TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` -// PEM-encoded trusted CA certificate chain. -// +kubebuilder:validation:Optional -TrustedCABytes *string `json:"trustedCaBytes,omitempty" tf:"trusted_ca_bytes,omitempty"` - -// Trusted CA certificate ID in the Certificate Manager. -// +kubebuilder:validation:Optional -TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` + // Trusted CA certificate ID in the Certificate Manager. + // +kubebuilder:validation:Optional + TrustedCAID *string `json:"trustedCaId,omitempty" tf:"trusted_ca_id,omitempty"` } // BackendGroupSpec defines the desired state of BackendGroup type BackendGroupSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider BackendGroupParameters `json:"forProvider"` + ForProvider BackendGroupParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -1630,20 +1449,19 @@ type BackendGroupSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider BackendGroupInitParameters `json:"initProvider,omitempty"` + InitProvider BackendGroupInitParameters `json:"initProvider,omitempty"` } // BackendGroupStatus defines the observed state of BackendGroup. type BackendGroupStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider BackendGroupObservation `json:"atProvider,omitempty"` + AtProvider BackendGroupObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // BackendGroup is the Schema for the BackendGroups API. An application load balancer distributes the load across cloud resources that are combined into a backend group. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/alb/v1alpha1/zz_generated.conversion_hubs.go b/apis/alb/v1alpha1/zz_generated.conversion_hubs.go index 5175fda..374e971 100755 --- a/apis/alb/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/alb/v1alpha1/zz_generated.conversion_hubs.go @@ -1,22 +1,18 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *BackendGroup) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *BackendGroup) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *HTTPRouter) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *LoadBalancer) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *HTTPRouter) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *TargetGroup) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *LoadBalancer) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *VirtualHost) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *TargetGroup) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *VirtualHost) Hub() {} diff --git a/apis/alb/v1alpha1/zz_generated.deepcopy.go b/apis/alb/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..5304567 --- /dev/null +++ b/apis/alb/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,10192 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressInitParameters) DeepCopyInto(out *AddressInitParameters) { + *out = *in + if in.ExternalIPv4Address != nil { + in, out := &in.ExternalIPv4Address, &out.ExternalIPv4Address + *out = make([]ExternalIPv4AddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExternalIPv6Address != nil { + in, out := &in.ExternalIPv6Address, &out.ExternalIPv6Address + *out = make([]ExternalIPv6AddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InternalIPv4Address != nil { + in, out := &in.InternalIPv4Address, &out.InternalIPv4Address + *out = make([]InternalIPv4AddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressInitParameters. +func (in *AddressInitParameters) DeepCopy() *AddressInitParameters { + if in == nil { + return nil + } + out := new(AddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressObservation) DeepCopyInto(out *AddressObservation) { + *out = *in + if in.ExternalIPv4Address != nil { + in, out := &in.ExternalIPv4Address, &out.ExternalIPv4Address + *out = make([]ExternalIPv4AddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExternalIPv6Address != nil { + in, out := &in.ExternalIPv6Address, &out.ExternalIPv6Address + *out = make([]ExternalIPv6AddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InternalIPv4Address != nil { + in, out := &in.InternalIPv4Address, &out.InternalIPv4Address + *out = make([]InternalIPv4AddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressObservation. +func (in *AddressObservation) DeepCopy() *AddressObservation { + if in == nil { + return nil + } + out := new(AddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressParameters) DeepCopyInto(out *AddressParameters) { + *out = *in + if in.ExternalIPv4Address != nil { + in, out := &in.ExternalIPv4Address, &out.ExternalIPv4Address + *out = make([]ExternalIPv4AddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExternalIPv6Address != nil { + in, out := &in.ExternalIPv6Address, &out.ExternalIPv6Address + *out = make([]ExternalIPv6AddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InternalIPv4Address != nil { + in, out := &in.InternalIPv4Address, &out.InternalIPv4Address + *out = make([]InternalIPv4AddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressParameters. +func (in *AddressParameters) DeepCopy() *AddressParameters { + if in == nil { + return nil + } + out := new(AddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyInitParameters) DeepCopyInto(out *AllocationPolicyInitParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]LocationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyInitParameters. +func (in *AllocationPolicyInitParameters) DeepCopy() *AllocationPolicyInitParameters { + if in == nil { + return nil + } + out := new(AllocationPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyObservation) DeepCopyInto(out *AllocationPolicyObservation) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]LocationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyObservation. +func (in *AllocationPolicyObservation) DeepCopy() *AllocationPolicyObservation { + if in == nil { + return nil + } + out := new(AllocationPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyParameters) DeepCopyInto(out *AllocationPolicyParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]LocationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyParameters. +func (in *AllocationPolicyParameters) DeepCopy() *AllocationPolicyParameters { + if in == nil { + return nil + } + out := new(AllocationPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndPrincipalsHeaderInitParameters) DeepCopyInto(out *AndPrincipalsHeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndPrincipalsHeaderInitParameters. +func (in *AndPrincipalsHeaderInitParameters) DeepCopy() *AndPrincipalsHeaderInitParameters { + if in == nil { + return nil + } + out := new(AndPrincipalsHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndPrincipalsHeaderObservation) DeepCopyInto(out *AndPrincipalsHeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndPrincipalsHeaderObservation. +func (in *AndPrincipalsHeaderObservation) DeepCopy() *AndPrincipalsHeaderObservation { + if in == nil { + return nil + } + out := new(AndPrincipalsHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndPrincipalsHeaderParameters) DeepCopyInto(out *AndPrincipalsHeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndPrincipalsHeaderParameters. +func (in *AndPrincipalsHeaderParameters) DeepCopy() *AndPrincipalsHeaderParameters { + if in == nil { + return nil + } + out := new(AndPrincipalsHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndPrincipalsHeaderValueInitParameters) DeepCopyInto(out *AndPrincipalsHeaderValueInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndPrincipalsHeaderValueInitParameters. +func (in *AndPrincipalsHeaderValueInitParameters) DeepCopy() *AndPrincipalsHeaderValueInitParameters { + if in == nil { + return nil + } + out := new(AndPrincipalsHeaderValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndPrincipalsHeaderValueObservation) DeepCopyInto(out *AndPrincipalsHeaderValueObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndPrincipalsHeaderValueObservation. +func (in *AndPrincipalsHeaderValueObservation) DeepCopy() *AndPrincipalsHeaderValueObservation { + if in == nil { + return nil + } + out := new(AndPrincipalsHeaderValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndPrincipalsHeaderValueParameters) DeepCopyInto(out *AndPrincipalsHeaderValueParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndPrincipalsHeaderValueParameters. +func (in *AndPrincipalsHeaderValueParameters) DeepCopy() *AndPrincipalsHeaderValueParameters { + if in == nil { + return nil + } + out := new(AndPrincipalsHeaderValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndPrincipalsInitParameters) DeepCopyInto(out *AndPrincipalsInitParameters) { + *out = *in + if in.Any != nil { + in, out := &in.Any, &out.Any + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]AndPrincipalsHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteIP != nil { + in, out := &in.RemoteIP, &out.RemoteIP + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndPrincipalsInitParameters. +func (in *AndPrincipalsInitParameters) DeepCopy() *AndPrincipalsInitParameters { + if in == nil { + return nil + } + out := new(AndPrincipalsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndPrincipalsObservation) DeepCopyInto(out *AndPrincipalsObservation) { + *out = *in + if in.Any != nil { + in, out := &in.Any, &out.Any + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]AndPrincipalsHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteIP != nil { + in, out := &in.RemoteIP, &out.RemoteIP + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndPrincipalsObservation. +func (in *AndPrincipalsObservation) DeepCopy() *AndPrincipalsObservation { + if in == nil { + return nil + } + out := new(AndPrincipalsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndPrincipalsParameters) DeepCopyInto(out *AndPrincipalsParameters) { + *out = *in + if in.Any != nil { + in, out := &in.Any, &out.Any + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]AndPrincipalsHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteIP != nil { + in, out := &in.RemoteIP, &out.RemoteIP + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndPrincipalsParameters. +func (in *AndPrincipalsParameters) DeepCopy() *AndPrincipalsParameters { + if in == nil { + return nil + } + out := new(AndPrincipalsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendGroup) DeepCopyInto(out *BackendGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendGroup. +func (in *BackendGroup) DeepCopy() *BackendGroup { + if in == nil { + return nil + } + out := new(BackendGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackendGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendGroupInitParameters) DeepCopyInto(out *BackendGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GRPCBackend != nil { + in, out := &in.GRPCBackend, &out.GRPCBackend + *out = make([]GRPCBackendInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPBackend != nil { + in, out := &in.HTTPBackend, &out.HTTPBackend + *out = make([]HTTPBackendInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SessionAffinity != nil { + in, out := &in.SessionAffinity, &out.SessionAffinity + *out = make([]SessionAffinityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamBackend != nil { + in, out := &in.StreamBackend, &out.StreamBackend + *out = make([]StreamBackendInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendGroupInitParameters. +func (in *BackendGroupInitParameters) DeepCopy() *BackendGroupInitParameters { + if in == nil { + return nil + } + out := new(BackendGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendGroupList) DeepCopyInto(out *BackendGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackendGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendGroupList. +func (in *BackendGroupList) DeepCopy() *BackendGroupList { + if in == nil { + return nil + } + out := new(BackendGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackendGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendGroupObservation) DeepCopyInto(out *BackendGroupObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.GRPCBackend != nil { + in, out := &in.GRPCBackend, &out.GRPCBackend + *out = make([]GRPCBackendObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPBackend != nil { + in, out := &in.HTTPBackend, &out.HTTPBackend + *out = make([]HTTPBackendObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SessionAffinity != nil { + in, out := &in.SessionAffinity, &out.SessionAffinity + *out = make([]SessionAffinityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamBackend != nil { + in, out := &in.StreamBackend, &out.StreamBackend + *out = make([]StreamBackendObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendGroupObservation. +func (in *BackendGroupObservation) DeepCopy() *BackendGroupObservation { + if in == nil { + return nil + } + out := new(BackendGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendGroupParameters) DeepCopyInto(out *BackendGroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GRPCBackend != nil { + in, out := &in.GRPCBackend, &out.GRPCBackend + *out = make([]GRPCBackendParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPBackend != nil { + in, out := &in.HTTPBackend, &out.HTTPBackend + *out = make([]HTTPBackendParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SessionAffinity != nil { + in, out := &in.SessionAffinity, &out.SessionAffinity + *out = make([]SessionAffinityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamBackend != nil { + in, out := &in.StreamBackend, &out.StreamBackend + *out = make([]StreamBackendParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendGroupParameters. +func (in *BackendGroupParameters) DeepCopy() *BackendGroupParameters { + if in == nil { + return nil + } + out := new(BackendGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendGroupSpec) DeepCopyInto(out *BackendGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendGroupSpec. +func (in *BackendGroupSpec) DeepCopy() *BackendGroupSpec { + if in == nil { + return nil + } + out := new(BackendGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendGroupStatus) DeepCopyInto(out *BackendGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendGroupStatus. +func (in *BackendGroupStatus) DeepCopy() *BackendGroupStatus { + if in == nil { + return nil + } + out := new(BackendGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionInitParameters) DeepCopyInto(out *ConnectionInitParameters) { + *out = *in + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionInitParameters. +func (in *ConnectionInitParameters) DeepCopy() *ConnectionInitParameters { + if in == nil { + return nil + } + out := new(ConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionObservation) DeepCopyInto(out *ConnectionObservation) { + *out = *in + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionObservation. +func (in *ConnectionObservation) DeepCopy() *ConnectionObservation { + if in == nil { + return nil + } + out := new(ConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionParameters) DeepCopyInto(out *ConnectionParameters) { + *out = *in + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionParameters. +func (in *ConnectionParameters) DeepCopy() *ConnectionParameters { + if in == nil { + return nil + } + out := new(ConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookieInitParameters) DeepCopyInto(out *CookieInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookieInitParameters. +func (in *CookieInitParameters) DeepCopy() *CookieInitParameters { + if in == nil { + return nil + } + out := new(CookieInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookieObservation) DeepCopyInto(out *CookieObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookieObservation. +func (in *CookieObservation) DeepCopy() *CookieObservation { + if in == nil { + return nil + } + out := new(CookieObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookieParameters) DeepCopyInto(out *CookieParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookieParameters. +func (in *CookieParameters) DeepCopy() *CookieParameters { + if in == nil { + return nil + } + out := new(CookieParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultHandlerInitParameters) DeepCopyInto(out *DefaultHandlerInitParameters) { + *out = *in + if in.CertificateIds != nil { + in, out := &in.CertificateIds, &out.CertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPHandler != nil { + in, out := &in.HTTPHandler, &out.HTTPHandler + *out = make([]HTTPHandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamHandler != nil { + in, out := &in.StreamHandler, &out.StreamHandler + *out = make([]DefaultHandlerStreamHandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultHandlerInitParameters. +func (in *DefaultHandlerInitParameters) DeepCopy() *DefaultHandlerInitParameters { + if in == nil { + return nil + } + out := new(DefaultHandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultHandlerObservation) DeepCopyInto(out *DefaultHandlerObservation) { + *out = *in + if in.CertificateIds != nil { + in, out := &in.CertificateIds, &out.CertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPHandler != nil { + in, out := &in.HTTPHandler, &out.HTTPHandler + *out = make([]HTTPHandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamHandler != nil { + in, out := &in.StreamHandler, &out.StreamHandler + *out = make([]DefaultHandlerStreamHandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultHandlerObservation. +func (in *DefaultHandlerObservation) DeepCopy() *DefaultHandlerObservation { + if in == nil { + return nil + } + out := new(DefaultHandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultHandlerParameters) DeepCopyInto(out *DefaultHandlerParameters) { + *out = *in + if in.CertificateIds != nil { + in, out := &in.CertificateIds, &out.CertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPHandler != nil { + in, out := &in.HTTPHandler, &out.HTTPHandler + *out = make([]HTTPHandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamHandler != nil { + in, out := &in.StreamHandler, &out.StreamHandler + *out = make([]DefaultHandlerStreamHandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultHandlerParameters. +func (in *DefaultHandlerParameters) DeepCopy() *DefaultHandlerParameters { + if in == nil { + return nil + } + out := new(DefaultHandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultHandlerStreamHandlerInitParameters) DeepCopyInto(out *DefaultHandlerStreamHandlerInitParameters) { + *out = *in + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultHandlerStreamHandlerInitParameters. +func (in *DefaultHandlerStreamHandlerInitParameters) DeepCopy() *DefaultHandlerStreamHandlerInitParameters { + if in == nil { + return nil + } + out := new(DefaultHandlerStreamHandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultHandlerStreamHandlerObservation) DeepCopyInto(out *DefaultHandlerStreamHandlerObservation) { + *out = *in + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultHandlerStreamHandlerObservation. +func (in *DefaultHandlerStreamHandlerObservation) DeepCopy() *DefaultHandlerStreamHandlerObservation { + if in == nil { + return nil + } + out := new(DefaultHandlerStreamHandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultHandlerStreamHandlerParameters) DeepCopyInto(out *DefaultHandlerStreamHandlerParameters) { + *out = *in + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultHandlerStreamHandlerParameters. +func (in *DefaultHandlerStreamHandlerParameters) DeepCopy() *DefaultHandlerStreamHandlerParameters { + if in == nil { + return nil + } + out := new(DefaultHandlerStreamHandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectResponseActionInitParameters) DeepCopyInto(out *DirectResponseActionInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectResponseActionInitParameters. +func (in *DirectResponseActionInitParameters) DeepCopy() *DirectResponseActionInitParameters { + if in == nil { + return nil + } + out := new(DirectResponseActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectResponseActionObservation) DeepCopyInto(out *DirectResponseActionObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectResponseActionObservation. +func (in *DirectResponseActionObservation) DeepCopy() *DirectResponseActionObservation { + if in == nil { + return nil + } + out := new(DirectResponseActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectResponseActionParameters) DeepCopyInto(out *DirectResponseActionParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectResponseActionParameters. +func (in *DirectResponseActionParameters) DeepCopy() *DirectResponseActionParameters { + if in == nil { + return nil + } + out := new(DirectResponseActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscardRuleInitParameters) DeepCopyInto(out *DiscardRuleInitParameters) { + *out = *in + if in.DiscardPercent != nil { + in, out := &in.DiscardPercent, &out.DiscardPercent + *out = new(float64) + **out = **in + } + if in.GRPCCodes != nil { + in, out := &in.GRPCCodes, &out.GRPCCodes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPCodeIntervals != nil { + in, out := &in.HTTPCodeIntervals, &out.HTTPCodeIntervals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPCodes != nil { + in, out := &in.HTTPCodes, &out.HTTPCodes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscardRuleInitParameters. +func (in *DiscardRuleInitParameters) DeepCopy() *DiscardRuleInitParameters { + if in == nil { + return nil + } + out := new(DiscardRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscardRuleObservation) DeepCopyInto(out *DiscardRuleObservation) { + *out = *in + if in.DiscardPercent != nil { + in, out := &in.DiscardPercent, &out.DiscardPercent + *out = new(float64) + **out = **in + } + if in.GRPCCodes != nil { + in, out := &in.GRPCCodes, &out.GRPCCodes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPCodeIntervals != nil { + in, out := &in.HTTPCodeIntervals, &out.HTTPCodeIntervals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPCodes != nil { + in, out := &in.HTTPCodes, &out.HTTPCodes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscardRuleObservation. +func (in *DiscardRuleObservation) DeepCopy() *DiscardRuleObservation { + if in == nil { + return nil + } + out := new(DiscardRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscardRuleParameters) DeepCopyInto(out *DiscardRuleParameters) { + *out = *in + if in.DiscardPercent != nil { + in, out := &in.DiscardPercent, &out.DiscardPercent + *out = new(float64) + **out = **in + } + if in.GRPCCodes != nil { + in, out := &in.GRPCCodes, &out.GRPCCodes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPCodeIntervals != nil { + in, out := &in.HTTPCodeIntervals, &out.HTTPCodeIntervals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPCodes != nil { + in, out := &in.HTTPCodes, &out.HTTPCodes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscardRuleParameters. +func (in *DiscardRuleParameters) DeepCopy() *DiscardRuleParameters { + if in == nil { + return nil + } + out := new(DiscardRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = make([]AddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters. +func (in *EndpointInitParameters) DeepCopy() *EndpointInitParameters { + if in == nil { + return nil + } + out := new(EndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = make([]AddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation. +func (in *EndpointObservation) DeepCopy() *EndpointObservation { + if in == nil { + return nil + } + out := new(EndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = make([]AddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters. +func (in *EndpointParameters) DeepCopy() *EndpointParameters { + if in == nil { + return nil + } + out := new(EndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPv4AddressInitParameters) DeepCopyInto(out *ExternalIPv4AddressInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPv4AddressInitParameters. +func (in *ExternalIPv4AddressInitParameters) DeepCopy() *ExternalIPv4AddressInitParameters { + if in == nil { + return nil + } + out := new(ExternalIPv4AddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPv4AddressObservation) DeepCopyInto(out *ExternalIPv4AddressObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPv4AddressObservation. +func (in *ExternalIPv4AddressObservation) DeepCopy() *ExternalIPv4AddressObservation { + if in == nil { + return nil + } + out := new(ExternalIPv4AddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPv4AddressParameters) DeepCopyInto(out *ExternalIPv4AddressParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPv4AddressParameters. +func (in *ExternalIPv4AddressParameters) DeepCopy() *ExternalIPv4AddressParameters { + if in == nil { + return nil + } + out := new(ExternalIPv4AddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPv6AddressInitParameters) DeepCopyInto(out *ExternalIPv6AddressInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPv6AddressInitParameters. +func (in *ExternalIPv6AddressInitParameters) DeepCopy() *ExternalIPv6AddressInitParameters { + if in == nil { + return nil + } + out := new(ExternalIPv6AddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPv6AddressObservation) DeepCopyInto(out *ExternalIPv6AddressObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPv6AddressObservation. +func (in *ExternalIPv6AddressObservation) DeepCopy() *ExternalIPv6AddressObservation { + if in == nil { + return nil + } + out := new(ExternalIPv6AddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPv6AddressParameters) DeepCopyInto(out *ExternalIPv6AddressParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPv6AddressParameters. +func (in *ExternalIPv6AddressParameters) DeepCopy() *ExternalIPv6AddressParameters { + if in == nil { + return nil + } + out := new(ExternalIPv6AddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FqmnInitParameters) DeepCopyInto(out *FqmnInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FqmnInitParameters. +func (in *FqmnInitParameters) DeepCopy() *FqmnInitParameters { + if in == nil { + return nil + } + out := new(FqmnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FqmnObservation) DeepCopyInto(out *FqmnObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FqmnObservation. +func (in *FqmnObservation) DeepCopy() *FqmnObservation { + if in == nil { + return nil + } + out := new(FqmnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FqmnParameters) DeepCopyInto(out *FqmnParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FqmnParameters. +func (in *FqmnParameters) DeepCopy() *FqmnParameters { + if in == nil { + return nil + } + out := new(FqmnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCBackendInitParameters) DeepCopyInto(out *GRPCBackendInitParameters) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]HealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancingConfig != nil { + in, out := &in.LoadBalancingConfig, &out.LoadBalancingConfig + *out = make([]LoadBalancingConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]TLSInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIds != nil { + in, out := &in.TargetGroupIds, &out.TargetGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetGroupIdsRefs != nil { + in, out := &in.TargetGroupIdsRefs, &out.TargetGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIdsSelector != nil { + in, out := &in.TargetGroupIdsSelector, &out.TargetGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCBackendInitParameters. +func (in *GRPCBackendInitParameters) DeepCopy() *GRPCBackendInitParameters { + if in == nil { + return nil + } + out := new(GRPCBackendInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCBackendObservation) DeepCopyInto(out *GRPCBackendObservation) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]HealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancingConfig != nil { + in, out := &in.LoadBalancingConfig, &out.LoadBalancingConfig + *out = make([]LoadBalancingConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]TLSObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIds != nil { + in, out := &in.TargetGroupIds, &out.TargetGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCBackendObservation. +func (in *GRPCBackendObservation) DeepCopy() *GRPCBackendObservation { + if in == nil { + return nil + } + out := new(GRPCBackendObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCBackendParameters) DeepCopyInto(out *GRPCBackendParameters) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]HealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancingConfig != nil { + in, out := &in.LoadBalancingConfig, &out.LoadBalancingConfig + *out = make([]LoadBalancingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]TLSParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIds != nil { + in, out := &in.TargetGroupIds, &out.TargetGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetGroupIdsRefs != nil { + in, out := &in.TargetGroupIdsRefs, &out.TargetGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIdsSelector != nil { + in, out := &in.TargetGroupIdsSelector, &out.TargetGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCBackendParameters. +func (in *GRPCBackendParameters) DeepCopy() *GRPCBackendParameters { + if in == nil { + return nil + } + out := new(GRPCBackendParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCHealthcheckInitParameters) DeepCopyInto(out *GRPCHealthcheckInitParameters) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCHealthcheckInitParameters. +func (in *GRPCHealthcheckInitParameters) DeepCopy() *GRPCHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(GRPCHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCHealthcheckObservation) DeepCopyInto(out *GRPCHealthcheckObservation) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCHealthcheckObservation. +func (in *GRPCHealthcheckObservation) DeepCopy() *GRPCHealthcheckObservation { + if in == nil { + return nil + } + out := new(GRPCHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCHealthcheckParameters) DeepCopyInto(out *GRPCHealthcheckParameters) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCHealthcheckParameters. +func (in *GRPCHealthcheckParameters) DeepCopy() *GRPCHealthcheckParameters { + if in == nil { + return nil + } + out := new(GRPCHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCMatchInitParameters) DeepCopyInto(out *GRPCMatchInitParameters) { + *out = *in + if in.Fqmn != nil { + in, out := &in.Fqmn, &out.Fqmn + *out = make([]FqmnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCMatchInitParameters. +func (in *GRPCMatchInitParameters) DeepCopy() *GRPCMatchInitParameters { + if in == nil { + return nil + } + out := new(GRPCMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCMatchObservation) DeepCopyInto(out *GRPCMatchObservation) { + *out = *in + if in.Fqmn != nil { + in, out := &in.Fqmn, &out.Fqmn + *out = make([]FqmnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCMatchObservation. +func (in *GRPCMatchObservation) DeepCopy() *GRPCMatchObservation { + if in == nil { + return nil + } + out := new(GRPCMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCMatchParameters) DeepCopyInto(out *GRPCMatchParameters) { + *out = *in + if in.Fqmn != nil { + in, out := &in.Fqmn, &out.Fqmn + *out = make([]FqmnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCMatchParameters. +func (in *GRPCMatchParameters) DeepCopy() *GRPCMatchParameters { + if in == nil { + return nil + } + out := new(GRPCMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteActionInitParameters) DeepCopyInto(out *GRPCRouteActionInitParameters) { + *out = *in + if in.AutoHostRewrite != nil { + in, out := &in.AutoHostRewrite, &out.AutoHostRewrite + *out = new(bool) + **out = **in + } + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } + if in.BackendGroupIDRef != nil { + in, out := &in.BackendGroupIDRef, &out.BackendGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BackendGroupIDSelector != nil { + in, out := &in.BackendGroupIDSelector, &out.BackendGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostRewrite != nil { + in, out := &in.HostRewrite, &out.HostRewrite + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(string) + **out = **in + } + if in.MaxTimeout != nil { + in, out := &in.MaxTimeout, &out.MaxTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteActionInitParameters. +func (in *GRPCRouteActionInitParameters) DeepCopy() *GRPCRouteActionInitParameters { + if in == nil { + return nil + } + out := new(GRPCRouteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteActionObservation) DeepCopyInto(out *GRPCRouteActionObservation) { + *out = *in + if in.AutoHostRewrite != nil { + in, out := &in.AutoHostRewrite, &out.AutoHostRewrite + *out = new(bool) + **out = **in + } + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } + if in.HostRewrite != nil { + in, out := &in.HostRewrite, &out.HostRewrite + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(string) + **out = **in + } + if in.MaxTimeout != nil { + in, out := &in.MaxTimeout, &out.MaxTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteActionObservation. +func (in *GRPCRouteActionObservation) DeepCopy() *GRPCRouteActionObservation { + if in == nil { + return nil + } + out := new(GRPCRouteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteActionParameters) DeepCopyInto(out *GRPCRouteActionParameters) { + *out = *in + if in.AutoHostRewrite != nil { + in, out := &in.AutoHostRewrite, &out.AutoHostRewrite + *out = new(bool) + **out = **in + } + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } + if in.BackendGroupIDRef != nil { + in, out := &in.BackendGroupIDRef, &out.BackendGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BackendGroupIDSelector != nil { + in, out := &in.BackendGroupIDSelector, &out.BackendGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostRewrite != nil { + in, out := &in.HostRewrite, &out.HostRewrite + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(string) + **out = **in + } + if in.MaxTimeout != nil { + in, out := &in.MaxTimeout, &out.MaxTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteActionParameters. +func (in *GRPCRouteActionParameters) DeepCopy() *GRPCRouteActionParameters { + if in == nil { + return nil + } + out := new(GRPCRouteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteInitParameters) DeepCopyInto(out *GRPCRouteInitParameters) { + *out = *in + if in.GRPCMatch != nil { + in, out := &in.GRPCMatch, &out.GRPCMatch + *out = make([]GRPCMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GRPCRouteAction != nil { + in, out := &in.GRPCRouteAction, &out.GRPCRouteAction + *out = make([]GRPCRouteActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GRPCStatusResponseAction != nil { + in, out := &in.GRPCStatusResponseAction, &out.GRPCStatusResponseAction + *out = make([]GRPCStatusResponseActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteInitParameters. +func (in *GRPCRouteInitParameters) DeepCopy() *GRPCRouteInitParameters { + if in == nil { + return nil + } + out := new(GRPCRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteObservation) DeepCopyInto(out *GRPCRouteObservation) { + *out = *in + if in.GRPCMatch != nil { + in, out := &in.GRPCMatch, &out.GRPCMatch + *out = make([]GRPCMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GRPCRouteAction != nil { + in, out := &in.GRPCRouteAction, &out.GRPCRouteAction + *out = make([]GRPCRouteActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GRPCStatusResponseAction != nil { + in, out := &in.GRPCStatusResponseAction, &out.GRPCStatusResponseAction + *out = make([]GRPCStatusResponseActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteObservation. +func (in *GRPCRouteObservation) DeepCopy() *GRPCRouteObservation { + if in == nil { + return nil + } + out := new(GRPCRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteParameters) DeepCopyInto(out *GRPCRouteParameters) { + *out = *in + if in.GRPCMatch != nil { + in, out := &in.GRPCMatch, &out.GRPCMatch + *out = make([]GRPCMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GRPCRouteAction != nil { + in, out := &in.GRPCRouteAction, &out.GRPCRouteAction + *out = make([]GRPCRouteActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GRPCStatusResponseAction != nil { + in, out := &in.GRPCStatusResponseAction, &out.GRPCStatusResponseAction + *out = make([]GRPCStatusResponseActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteParameters. +func (in *GRPCRouteParameters) DeepCopy() *GRPCRouteParameters { + if in == nil { + return nil + } + out := new(GRPCRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCStatusResponseActionInitParameters) DeepCopyInto(out *GRPCStatusResponseActionInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCStatusResponseActionInitParameters. +func (in *GRPCStatusResponseActionInitParameters) DeepCopy() *GRPCStatusResponseActionInitParameters { + if in == nil { + return nil + } + out := new(GRPCStatusResponseActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCStatusResponseActionObservation) DeepCopyInto(out *GRPCStatusResponseActionObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCStatusResponseActionObservation. +func (in *GRPCStatusResponseActionObservation) DeepCopy() *GRPCStatusResponseActionObservation { + if in == nil { + return nil + } + out := new(GRPCStatusResponseActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCStatusResponseActionParameters) DeepCopyInto(out *GRPCStatusResponseActionParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCStatusResponseActionParameters. +func (in *GRPCStatusResponseActionParameters) DeepCopy() *GRPCStatusResponseActionParameters { + if in == nil { + return nil + } + out := new(GRPCStatusResponseActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendHealthcheckInitParameters) DeepCopyInto(out *HTTPBackendHealthcheckInitParameters) { + *out = *in + if in.GRPCHealthcheck != nil { + in, out := &in.GRPCHealthcheck, &out.GRPCHealthcheck + *out = make([]HealthcheckGRPCHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPHealthcheck != nil { + in, out := &in.HTTPHealthcheck, &out.HTTPHealthcheck + *out = make([]HealthcheckHTTPHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthcheckPort != nil { + in, out := &in.HealthcheckPort, &out.HealthcheckPort + *out = new(float64) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.IntervalJitterPercent != nil { + in, out := &in.IntervalJitterPercent, &out.IntervalJitterPercent + *out = new(float64) + **out = **in + } + if in.StreamHealthcheck != nil { + in, out := &in.StreamHealthcheck, &out.StreamHealthcheck + *out = make([]HealthcheckStreamHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendHealthcheckInitParameters. +func (in *HTTPBackendHealthcheckInitParameters) DeepCopy() *HTTPBackendHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(HTTPBackendHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendHealthcheckObservation) DeepCopyInto(out *HTTPBackendHealthcheckObservation) { + *out = *in + if in.GRPCHealthcheck != nil { + in, out := &in.GRPCHealthcheck, &out.GRPCHealthcheck + *out = make([]HealthcheckGRPCHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPHealthcheck != nil { + in, out := &in.HTTPHealthcheck, &out.HTTPHealthcheck + *out = make([]HealthcheckHTTPHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthcheckPort != nil { + in, out := &in.HealthcheckPort, &out.HealthcheckPort + *out = new(float64) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.IntervalJitterPercent != nil { + in, out := &in.IntervalJitterPercent, &out.IntervalJitterPercent + *out = new(float64) + **out = **in + } + if in.StreamHealthcheck != nil { + in, out := &in.StreamHealthcheck, &out.StreamHealthcheck + *out = make([]HealthcheckStreamHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendHealthcheckObservation. +func (in *HTTPBackendHealthcheckObservation) DeepCopy() *HTTPBackendHealthcheckObservation { + if in == nil { + return nil + } + out := new(HTTPBackendHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendHealthcheckParameters) DeepCopyInto(out *HTTPBackendHealthcheckParameters) { + *out = *in + if in.GRPCHealthcheck != nil { + in, out := &in.GRPCHealthcheck, &out.GRPCHealthcheck + *out = make([]HealthcheckGRPCHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPHealthcheck != nil { + in, out := &in.HTTPHealthcheck, &out.HTTPHealthcheck + *out = make([]HealthcheckHTTPHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthcheckPort != nil { + in, out := &in.HealthcheckPort, &out.HealthcheckPort + *out = new(float64) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.IntervalJitterPercent != nil { + in, out := &in.IntervalJitterPercent, &out.IntervalJitterPercent + *out = new(float64) + **out = **in + } + if in.StreamHealthcheck != nil { + in, out := &in.StreamHealthcheck, &out.StreamHealthcheck + *out = make([]HealthcheckStreamHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendHealthcheckParameters. +func (in *HTTPBackendHealthcheckParameters) DeepCopy() *HTTPBackendHealthcheckParameters { + if in == nil { + return nil + } + out := new(HTTPBackendHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendInitParameters) DeepCopyInto(out *HTTPBackendInitParameters) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]HTTPBackendHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.LoadBalancingConfig != nil { + in, out := &in.LoadBalancingConfig, &out.LoadBalancingConfig + *out = make([]HTTPBackendLoadBalancingConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.StorageBucket != nil { + in, out := &in.StorageBucket, &out.StorageBucket + *out = new(string) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]HTTPBackendTLSInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIds != nil { + in, out := &in.TargetGroupIds, &out.TargetGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetGroupIdsRefs != nil { + in, out := &in.TargetGroupIdsRefs, &out.TargetGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIdsSelector != nil { + in, out := &in.TargetGroupIdsSelector, &out.TargetGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendInitParameters. +func (in *HTTPBackendInitParameters) DeepCopy() *HTTPBackendInitParameters { + if in == nil { + return nil + } + out := new(HTTPBackendInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendLoadBalancingConfigInitParameters) DeepCopyInto(out *HTTPBackendLoadBalancingConfigInitParameters) { + *out = *in + if in.LocalityAwareRoutingPercent != nil { + in, out := &in.LocalityAwareRoutingPercent, &out.LocalityAwareRoutingPercent + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PanicThreshold != nil { + in, out := &in.PanicThreshold, &out.PanicThreshold + *out = new(float64) + **out = **in + } + if in.StrictLocality != nil { + in, out := &in.StrictLocality, &out.StrictLocality + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendLoadBalancingConfigInitParameters. +func (in *HTTPBackendLoadBalancingConfigInitParameters) DeepCopy() *HTTPBackendLoadBalancingConfigInitParameters { + if in == nil { + return nil + } + out := new(HTTPBackendLoadBalancingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendLoadBalancingConfigObservation) DeepCopyInto(out *HTTPBackendLoadBalancingConfigObservation) { + *out = *in + if in.LocalityAwareRoutingPercent != nil { + in, out := &in.LocalityAwareRoutingPercent, &out.LocalityAwareRoutingPercent + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PanicThreshold != nil { + in, out := &in.PanicThreshold, &out.PanicThreshold + *out = new(float64) + **out = **in + } + if in.StrictLocality != nil { + in, out := &in.StrictLocality, &out.StrictLocality + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendLoadBalancingConfigObservation. +func (in *HTTPBackendLoadBalancingConfigObservation) DeepCopy() *HTTPBackendLoadBalancingConfigObservation { + if in == nil { + return nil + } + out := new(HTTPBackendLoadBalancingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendLoadBalancingConfigParameters) DeepCopyInto(out *HTTPBackendLoadBalancingConfigParameters) { + *out = *in + if in.LocalityAwareRoutingPercent != nil { + in, out := &in.LocalityAwareRoutingPercent, &out.LocalityAwareRoutingPercent + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PanicThreshold != nil { + in, out := &in.PanicThreshold, &out.PanicThreshold + *out = new(float64) + **out = **in + } + if in.StrictLocality != nil { + in, out := &in.StrictLocality, &out.StrictLocality + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendLoadBalancingConfigParameters. +func (in *HTTPBackendLoadBalancingConfigParameters) DeepCopy() *HTTPBackendLoadBalancingConfigParameters { + if in == nil { + return nil + } + out := new(HTTPBackendLoadBalancingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendObservation) DeepCopyInto(out *HTTPBackendObservation) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]HTTPBackendHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.LoadBalancingConfig != nil { + in, out := &in.LoadBalancingConfig, &out.LoadBalancingConfig + *out = make([]HTTPBackendLoadBalancingConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.StorageBucket != nil { + in, out := &in.StorageBucket, &out.StorageBucket + *out = new(string) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]HTTPBackendTLSObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIds != nil { + in, out := &in.TargetGroupIds, &out.TargetGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendObservation. +func (in *HTTPBackendObservation) DeepCopy() *HTTPBackendObservation { + if in == nil { + return nil + } + out := new(HTTPBackendObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendParameters) DeepCopyInto(out *HTTPBackendParameters) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]HTTPBackendHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.LoadBalancingConfig != nil { + in, out := &in.LoadBalancingConfig, &out.LoadBalancingConfig + *out = make([]HTTPBackendLoadBalancingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.StorageBucket != nil { + in, out := &in.StorageBucket, &out.StorageBucket + *out = new(string) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]HTTPBackendTLSParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIds != nil { + in, out := &in.TargetGroupIds, &out.TargetGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetGroupIdsRefs != nil { + in, out := &in.TargetGroupIdsRefs, &out.TargetGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIdsSelector != nil { + in, out := &in.TargetGroupIdsSelector, &out.TargetGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendParameters. +func (in *HTTPBackendParameters) DeepCopy() *HTTPBackendParameters { + if in == nil { + return nil + } + out := new(HTTPBackendParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendTLSInitParameters) DeepCopyInto(out *HTTPBackendTLSInitParameters) { + *out = *in + if in.Sni != nil { + in, out := &in.Sni, &out.Sni + *out = new(string) + **out = **in + } + if in.ValidationContext != nil { + in, out := &in.ValidationContext, &out.ValidationContext + *out = make([]TLSValidationContextInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendTLSInitParameters. +func (in *HTTPBackendTLSInitParameters) DeepCopy() *HTTPBackendTLSInitParameters { + if in == nil { + return nil + } + out := new(HTTPBackendTLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendTLSObservation) DeepCopyInto(out *HTTPBackendTLSObservation) { + *out = *in + if in.Sni != nil { + in, out := &in.Sni, &out.Sni + *out = new(string) + **out = **in + } + if in.ValidationContext != nil { + in, out := &in.ValidationContext, &out.ValidationContext + *out = make([]TLSValidationContextObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendTLSObservation. +func (in *HTTPBackendTLSObservation) DeepCopy() *HTTPBackendTLSObservation { + if in == nil { + return nil + } + out := new(HTTPBackendTLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendTLSParameters) DeepCopyInto(out *HTTPBackendTLSParameters) { + *out = *in + if in.Sni != nil { + in, out := &in.Sni, &out.Sni + *out = new(string) + **out = **in + } + if in.ValidationContext != nil { + in, out := &in.ValidationContext, &out.ValidationContext + *out = make([]TLSValidationContextParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendTLSParameters. +func (in *HTTPBackendTLSParameters) DeepCopy() *HTTPBackendTLSParameters { + if in == nil { + return nil + } + out := new(HTTPBackendTLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHandlerHttp2OptionsInitParameters) DeepCopyInto(out *HTTPHandlerHttp2OptionsInitParameters) { + *out = *in + if in.MaxConcurrentStreams != nil { + in, out := &in.MaxConcurrentStreams, &out.MaxConcurrentStreams + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHandlerHttp2OptionsInitParameters. +func (in *HTTPHandlerHttp2OptionsInitParameters) DeepCopy() *HTTPHandlerHttp2OptionsInitParameters { + if in == nil { + return nil + } + out := new(HTTPHandlerHttp2OptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHandlerHttp2OptionsObservation) DeepCopyInto(out *HTTPHandlerHttp2OptionsObservation) { + *out = *in + if in.MaxConcurrentStreams != nil { + in, out := &in.MaxConcurrentStreams, &out.MaxConcurrentStreams + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHandlerHttp2OptionsObservation. +func (in *HTTPHandlerHttp2OptionsObservation) DeepCopy() *HTTPHandlerHttp2OptionsObservation { + if in == nil { + return nil + } + out := new(HTTPHandlerHttp2OptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHandlerHttp2OptionsParameters) DeepCopyInto(out *HTTPHandlerHttp2OptionsParameters) { + *out = *in + if in.MaxConcurrentStreams != nil { + in, out := &in.MaxConcurrentStreams, &out.MaxConcurrentStreams + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHandlerHttp2OptionsParameters. +func (in *HTTPHandlerHttp2OptionsParameters) DeepCopy() *HTTPHandlerHttp2OptionsParameters { + if in == nil { + return nil + } + out := new(HTTPHandlerHttp2OptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHandlerInitParameters) DeepCopyInto(out *HTTPHandlerInitParameters) { + *out = *in + if in.AllowHttp10 != nil { + in, out := &in.AllowHttp10, &out.AllowHttp10 + *out = new(bool) + **out = **in + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.Http2Options != nil { + in, out := &in.Http2Options, &out.Http2Options + *out = make([]HTTPHandlerHttp2OptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRequestID != nil { + in, out := &in.RewriteRequestID, &out.RewriteRequestID + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHandlerInitParameters. +func (in *HTTPHandlerInitParameters) DeepCopy() *HTTPHandlerInitParameters { + if in == nil { + return nil + } + out := new(HTTPHandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHandlerObservation) DeepCopyInto(out *HTTPHandlerObservation) { + *out = *in + if in.AllowHttp10 != nil { + in, out := &in.AllowHttp10, &out.AllowHttp10 + *out = new(bool) + **out = **in + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.Http2Options != nil { + in, out := &in.Http2Options, &out.Http2Options + *out = make([]HTTPHandlerHttp2OptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRequestID != nil { + in, out := &in.RewriteRequestID, &out.RewriteRequestID + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHandlerObservation. +func (in *HTTPHandlerObservation) DeepCopy() *HTTPHandlerObservation { + if in == nil { + return nil + } + out := new(HTTPHandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHandlerParameters) DeepCopyInto(out *HTTPHandlerParameters) { + *out = *in + if in.AllowHttp10 != nil { + in, out := &in.AllowHttp10, &out.AllowHttp10 + *out = new(bool) + **out = **in + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.Http2Options != nil { + in, out := &in.Http2Options, &out.Http2Options + *out = make([]HTTPHandlerHttp2OptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRequestID != nil { + in, out := &in.RewriteRequestID, &out.RewriteRequestID + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHandlerParameters. +func (in *HTTPHandlerParameters) DeepCopy() *HTTPHandlerParameters { + if in == nil { + return nil + } + out := new(HTTPHandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHealthcheckInitParameters) DeepCopyInto(out *HTTPHealthcheckInitParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHealthcheckInitParameters. +func (in *HTTPHealthcheckInitParameters) DeepCopy() *HTTPHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(HTTPHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHealthcheckObservation) DeepCopyInto(out *HTTPHealthcheckObservation) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHealthcheckObservation. +func (in *HTTPHealthcheckObservation) DeepCopy() *HTTPHealthcheckObservation { + if in == nil { + return nil + } + out := new(HTTPHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHealthcheckParameters) DeepCopyInto(out *HTTPHealthcheckParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHealthcheckParameters. +func (in *HTTPHealthcheckParameters) DeepCopy() *HTTPHealthcheckParameters { + if in == nil { + return nil + } + out := new(HTTPHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPInitParameters) DeepCopyInto(out *HTTPInitParameters) { + *out = *in + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make([]HandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Redirects != nil { + in, out := &in.Redirects, &out.Redirects + *out = make([]RedirectsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPInitParameters. +func (in *HTTPInitParameters) DeepCopy() *HTTPInitParameters { + if in == nil { + return nil + } + out := new(HTTPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMatchInitParameters) DeepCopyInto(out *HTTPMatchInitParameters) { + *out = *in + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]PathInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMatchInitParameters. +func (in *HTTPMatchInitParameters) DeepCopy() *HTTPMatchInitParameters { + if in == nil { + return nil + } + out := new(HTTPMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMatchObservation) DeepCopyInto(out *HTTPMatchObservation) { + *out = *in + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]PathObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMatchObservation. +func (in *HTTPMatchObservation) DeepCopy() *HTTPMatchObservation { + if in == nil { + return nil + } + out := new(HTTPMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMatchParameters) DeepCopyInto(out *HTTPMatchParameters) { + *out = *in + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]PathParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMatchParameters. +func (in *HTTPMatchParameters) DeepCopy() *HTTPMatchParameters { + if in == nil { + return nil + } + out := new(HTTPMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPObservation) DeepCopyInto(out *HTTPObservation) { + *out = *in + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make([]HandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Redirects != nil { + in, out := &in.Redirects, &out.Redirects + *out = make([]RedirectsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPObservation. +func (in *HTTPObservation) DeepCopy() *HTTPObservation { + if in == nil { + return nil + } + out := new(HTTPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPParameters) DeepCopyInto(out *HTTPParameters) { + *out = *in + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make([]HandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Redirects != nil { + in, out := &in.Redirects, &out.Redirects + *out = make([]RedirectsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPParameters. +func (in *HTTPParameters) DeepCopy() *HTTPParameters { + if in == nil { + return nil + } + out := new(HTTPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionInitParameters) DeepCopyInto(out *HTTPRouteActionInitParameters) { + *out = *in + if in.AutoHostRewrite != nil { + in, out := &in.AutoHostRewrite, &out.AutoHostRewrite + *out = new(bool) + **out = **in + } + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } + if in.BackendGroupIDRef != nil { + in, out := &in.BackendGroupIDRef, &out.BackendGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BackendGroupIDSelector != nil { + in, out := &in.BackendGroupIDSelector, &out.BackendGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostRewrite != nil { + in, out := &in.HostRewrite, &out.HostRewrite + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(string) + **out = **in + } + if in.PrefixRewrite != nil { + in, out := &in.PrefixRewrite, &out.PrefixRewrite + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UpgradeTypes != nil { + in, out := &in.UpgradeTypes, &out.UpgradeTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionInitParameters. +func (in *HTTPRouteActionInitParameters) DeepCopy() *HTTPRouteActionInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionObservation) DeepCopyInto(out *HTTPRouteActionObservation) { + *out = *in + if in.AutoHostRewrite != nil { + in, out := &in.AutoHostRewrite, &out.AutoHostRewrite + *out = new(bool) + **out = **in + } + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } + if in.HostRewrite != nil { + in, out := &in.HostRewrite, &out.HostRewrite + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(string) + **out = **in + } + if in.PrefixRewrite != nil { + in, out := &in.PrefixRewrite, &out.PrefixRewrite + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UpgradeTypes != nil { + in, out := &in.UpgradeTypes, &out.UpgradeTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionObservation. +func (in *HTTPRouteActionObservation) DeepCopy() *HTTPRouteActionObservation { + if in == nil { + return nil + } + out := new(HTTPRouteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionParameters) DeepCopyInto(out *HTTPRouteActionParameters) { + *out = *in + if in.AutoHostRewrite != nil { + in, out := &in.AutoHostRewrite, &out.AutoHostRewrite + *out = new(bool) + **out = **in + } + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } + if in.BackendGroupIDRef != nil { + in, out := &in.BackendGroupIDRef, &out.BackendGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BackendGroupIDSelector != nil { + in, out := &in.BackendGroupIDSelector, &out.BackendGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostRewrite != nil { + in, out := &in.HostRewrite, &out.HostRewrite + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(string) + **out = **in + } + if in.PrefixRewrite != nil { + in, out := &in.PrefixRewrite, &out.PrefixRewrite + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UpgradeTypes != nil { + in, out := &in.UpgradeTypes, &out.UpgradeTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionParameters. +func (in *HTTPRouteActionParameters) DeepCopy() *HTTPRouteActionParameters { + if in == nil { + return nil + } + out := new(HTTPRouteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteInitParameters) DeepCopyInto(out *HTTPRouteInitParameters) { + *out = *in + if in.DirectResponseAction != nil { + in, out := &in.DirectResponseAction, &out.DirectResponseAction + *out = make([]DirectResponseActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMatch != nil { + in, out := &in.HTTPMatch, &out.HTTPMatch + *out = make([]HTTPMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPRouteAction != nil { + in, out := &in.HTTPRouteAction, &out.HTTPRouteAction + *out = make([]HTTPRouteActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RedirectAction != nil { + in, out := &in.RedirectAction, &out.RedirectAction + *out = make([]RedirectActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteInitParameters. +func (in *HTTPRouteInitParameters) DeepCopy() *HTTPRouteInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteObservation) DeepCopyInto(out *HTTPRouteObservation) { + *out = *in + if in.DirectResponseAction != nil { + in, out := &in.DirectResponseAction, &out.DirectResponseAction + *out = make([]DirectResponseActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMatch != nil { + in, out := &in.HTTPMatch, &out.HTTPMatch + *out = make([]HTTPMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPRouteAction != nil { + in, out := &in.HTTPRouteAction, &out.HTTPRouteAction + *out = make([]HTTPRouteActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RedirectAction != nil { + in, out := &in.RedirectAction, &out.RedirectAction + *out = make([]RedirectActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteObservation. +func (in *HTTPRouteObservation) DeepCopy() *HTTPRouteObservation { + if in == nil { + return nil + } + out := new(HTTPRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteParameters) DeepCopyInto(out *HTTPRouteParameters) { + *out = *in + if in.DirectResponseAction != nil { + in, out := &in.DirectResponseAction, &out.DirectResponseAction + *out = make([]DirectResponseActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMatch != nil { + in, out := &in.HTTPMatch, &out.HTTPMatch + *out = make([]HTTPMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPRouteAction != nil { + in, out := &in.HTTPRouteAction, &out.HTTPRouteAction + *out = make([]HTTPRouteActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RedirectAction != nil { + in, out := &in.RedirectAction, &out.RedirectAction + *out = make([]RedirectActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteParameters. +func (in *HTTPRouteParameters) DeepCopy() *HTTPRouteParameters { + if in == nil { + return nil + } + out := new(HTTPRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouter) DeepCopyInto(out *HTTPRouter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouter. +func (in *HTTPRouter) DeepCopy() *HTTPRouter { + if in == nil { + return nil + } + out := new(HTTPRouter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRouter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouterInitParameters) DeepCopyInto(out *HTTPRouterInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RouteOptions != nil { + in, out := &in.RouteOptions, &out.RouteOptions + *out = make([]RouteOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouterInitParameters. +func (in *HTTPRouterInitParameters) DeepCopy() *HTTPRouterInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouterList) DeepCopyInto(out *HTTPRouterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HTTPRouter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouterList. +func (in *HTTPRouterList) DeepCopy() *HTTPRouterList { + if in == nil { + return nil + } + out := new(HTTPRouterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRouterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouterObservation) DeepCopyInto(out *HTTPRouterObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RouteOptions != nil { + in, out := &in.RouteOptions, &out.RouteOptions + *out = make([]RouteOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouterObservation. +func (in *HTTPRouterObservation) DeepCopy() *HTTPRouterObservation { + if in == nil { + return nil + } + out := new(HTTPRouterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouterParameters) DeepCopyInto(out *HTTPRouterParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RouteOptions != nil { + in, out := &in.RouteOptions, &out.RouteOptions + *out = make([]RouteOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouterParameters. +func (in *HTTPRouterParameters) DeepCopy() *HTTPRouterParameters { + if in == nil { + return nil + } + out := new(HTTPRouterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouterSpec) DeepCopyInto(out *HTTPRouterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouterSpec. +func (in *HTTPRouterSpec) DeepCopy() *HTTPRouterSpec { + if in == nil { + return nil + } + out := new(HTTPRouterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouterStatus) DeepCopyInto(out *HTTPRouterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouterStatus. +func (in *HTTPRouterStatus) DeepCopy() *HTTPRouterStatus { + if in == nil { + return nil + } + out := new(HTTPRouterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerHTTPHandlerHttp2OptionsInitParameters) DeepCopyInto(out *HandlerHTTPHandlerHttp2OptionsInitParameters) { + *out = *in + if in.MaxConcurrentStreams != nil { + in, out := &in.MaxConcurrentStreams, &out.MaxConcurrentStreams + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerHTTPHandlerHttp2OptionsInitParameters. +func (in *HandlerHTTPHandlerHttp2OptionsInitParameters) DeepCopy() *HandlerHTTPHandlerHttp2OptionsInitParameters { + if in == nil { + return nil + } + out := new(HandlerHTTPHandlerHttp2OptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerHTTPHandlerHttp2OptionsObservation) DeepCopyInto(out *HandlerHTTPHandlerHttp2OptionsObservation) { + *out = *in + if in.MaxConcurrentStreams != nil { + in, out := &in.MaxConcurrentStreams, &out.MaxConcurrentStreams + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerHTTPHandlerHttp2OptionsObservation. +func (in *HandlerHTTPHandlerHttp2OptionsObservation) DeepCopy() *HandlerHTTPHandlerHttp2OptionsObservation { + if in == nil { + return nil + } + out := new(HandlerHTTPHandlerHttp2OptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerHTTPHandlerHttp2OptionsParameters) DeepCopyInto(out *HandlerHTTPHandlerHttp2OptionsParameters) { + *out = *in + if in.MaxConcurrentStreams != nil { + in, out := &in.MaxConcurrentStreams, &out.MaxConcurrentStreams + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerHTTPHandlerHttp2OptionsParameters. +func (in *HandlerHTTPHandlerHttp2OptionsParameters) DeepCopy() *HandlerHTTPHandlerHttp2OptionsParameters { + if in == nil { + return nil + } + out := new(HandlerHTTPHandlerHttp2OptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerHTTPHandlerInitParameters) DeepCopyInto(out *HandlerHTTPHandlerInitParameters) { + *out = *in + if in.AllowHttp10 != nil { + in, out := &in.AllowHttp10, &out.AllowHttp10 + *out = new(bool) + **out = **in + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.Http2Options != nil { + in, out := &in.Http2Options, &out.Http2Options + *out = make([]HandlerHTTPHandlerHttp2OptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRequestID != nil { + in, out := &in.RewriteRequestID, &out.RewriteRequestID + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerHTTPHandlerInitParameters. +func (in *HandlerHTTPHandlerInitParameters) DeepCopy() *HandlerHTTPHandlerInitParameters { + if in == nil { + return nil + } + out := new(HandlerHTTPHandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerHTTPHandlerObservation) DeepCopyInto(out *HandlerHTTPHandlerObservation) { + *out = *in + if in.AllowHttp10 != nil { + in, out := &in.AllowHttp10, &out.AllowHttp10 + *out = new(bool) + **out = **in + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.Http2Options != nil { + in, out := &in.Http2Options, &out.Http2Options + *out = make([]HandlerHTTPHandlerHttp2OptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRequestID != nil { + in, out := &in.RewriteRequestID, &out.RewriteRequestID + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerHTTPHandlerObservation. +func (in *HandlerHTTPHandlerObservation) DeepCopy() *HandlerHTTPHandlerObservation { + if in == nil { + return nil + } + out := new(HandlerHTTPHandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerHTTPHandlerParameters) DeepCopyInto(out *HandlerHTTPHandlerParameters) { + *out = *in + if in.AllowHttp10 != nil { + in, out := &in.AllowHttp10, &out.AllowHttp10 + *out = new(bool) + **out = **in + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.Http2Options != nil { + in, out := &in.Http2Options, &out.Http2Options + *out = make([]HandlerHTTPHandlerHttp2OptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRequestID != nil { + in, out := &in.RewriteRequestID, &out.RewriteRequestID + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerHTTPHandlerParameters. +func (in *HandlerHTTPHandlerParameters) DeepCopy() *HandlerHTTPHandlerParameters { + if in == nil { + return nil + } + out := new(HandlerHTTPHandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerInitParameters) DeepCopyInto(out *HandlerInitParameters) { + *out = *in + if in.AllowHttp10 != nil { + in, out := &in.AllowHttp10, &out.AllowHttp10 + *out = new(bool) + **out = **in + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.HTTPRouterIDRef != nil { + in, out := &in.HTTPRouterIDRef, &out.HTTPRouterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouterIDSelector != nil { + in, out := &in.HTTPRouterIDSelector, &out.HTTPRouterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Http2Options != nil { + in, out := &in.Http2Options, &out.Http2Options + *out = make([]Http2OptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRequestID != nil { + in, out := &in.RewriteRequestID, &out.RewriteRequestID + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerInitParameters. +func (in *HandlerInitParameters) DeepCopy() *HandlerInitParameters { + if in == nil { + return nil + } + out := new(HandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerObservation) DeepCopyInto(out *HandlerObservation) { + *out = *in + if in.AllowHttp10 != nil { + in, out := &in.AllowHttp10, &out.AllowHttp10 + *out = new(bool) + **out = **in + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.Http2Options != nil { + in, out := &in.Http2Options, &out.Http2Options + *out = make([]Http2OptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRequestID != nil { + in, out := &in.RewriteRequestID, &out.RewriteRequestID + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerObservation. +func (in *HandlerObservation) DeepCopy() *HandlerObservation { + if in == nil { + return nil + } + out := new(HandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerParameters) DeepCopyInto(out *HandlerParameters) { + *out = *in + if in.AllowHttp10 != nil { + in, out := &in.AllowHttp10, &out.AllowHttp10 + *out = new(bool) + **out = **in + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.HTTPRouterIDRef != nil { + in, out := &in.HTTPRouterIDRef, &out.HTTPRouterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouterIDSelector != nil { + in, out := &in.HTTPRouterIDSelector, &out.HTTPRouterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Http2Options != nil { + in, out := &in.Http2Options, &out.Http2Options + *out = make([]Http2OptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRequestID != nil { + in, out := &in.RewriteRequestID, &out.RewriteRequestID + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerParameters. +func (in *HandlerParameters) DeepCopy() *HandlerParameters { + if in == nil { + return nil + } + out := new(HandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerStreamHandlerInitParameters) DeepCopyInto(out *HandlerStreamHandlerInitParameters) { + *out = *in + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerStreamHandlerInitParameters. +func (in *HandlerStreamHandlerInitParameters) DeepCopy() *HandlerStreamHandlerInitParameters { + if in == nil { + return nil + } + out := new(HandlerStreamHandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerStreamHandlerObservation) DeepCopyInto(out *HandlerStreamHandlerObservation) { + *out = *in + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerStreamHandlerObservation. +func (in *HandlerStreamHandlerObservation) DeepCopy() *HandlerStreamHandlerObservation { + if in == nil { + return nil + } + out := new(HandlerStreamHandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HandlerStreamHandlerParameters) DeepCopyInto(out *HandlerStreamHandlerParameters) { + *out = *in + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HandlerStreamHandlerParameters. +func (in *HandlerStreamHandlerParameters) DeepCopy() *HandlerStreamHandlerParameters { + if in == nil { + return nil + } + out := new(HandlerStreamHandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderInitParameters) DeepCopyInto(out *HeaderInitParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderInitParameters. +func (in *HeaderInitParameters) DeepCopy() *HeaderInitParameters { + if in == nil { + return nil + } + out := new(HeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderObservation) DeepCopyInto(out *HeaderObservation) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderObservation. +func (in *HeaderObservation) DeepCopy() *HeaderObservation { + if in == nil { + return nil + } + out := new(HeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderParameters) DeepCopyInto(out *HeaderParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderParameters. +func (in *HeaderParameters) DeepCopy() *HeaderParameters { + if in == nil { + return nil + } + out := new(HeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderValueInitParameters) DeepCopyInto(out *HeaderValueInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderValueInitParameters. +func (in *HeaderValueInitParameters) DeepCopy() *HeaderValueInitParameters { + if in == nil { + return nil + } + out := new(HeaderValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderValueObservation) DeepCopyInto(out *HeaderValueObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderValueObservation. +func (in *HeaderValueObservation) DeepCopy() *HeaderValueObservation { + if in == nil { + return nil + } + out := new(HeaderValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderValueParameters) DeepCopyInto(out *HeaderValueParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderValueParameters. +func (in *HeaderValueParameters) DeepCopy() *HeaderValueParameters { + if in == nil { + return nil + } + out := new(HeaderValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckGRPCHealthcheckInitParameters) DeepCopyInto(out *HealthcheckGRPCHealthcheckInitParameters) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckGRPCHealthcheckInitParameters. +func (in *HealthcheckGRPCHealthcheckInitParameters) DeepCopy() *HealthcheckGRPCHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(HealthcheckGRPCHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckGRPCHealthcheckObservation) DeepCopyInto(out *HealthcheckGRPCHealthcheckObservation) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckGRPCHealthcheckObservation. +func (in *HealthcheckGRPCHealthcheckObservation) DeepCopy() *HealthcheckGRPCHealthcheckObservation { + if in == nil { + return nil + } + out := new(HealthcheckGRPCHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckGRPCHealthcheckParameters) DeepCopyInto(out *HealthcheckGRPCHealthcheckParameters) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckGRPCHealthcheckParameters. +func (in *HealthcheckGRPCHealthcheckParameters) DeepCopy() *HealthcheckGRPCHealthcheckParameters { + if in == nil { + return nil + } + out := new(HealthcheckGRPCHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckHTTPHealthcheckInitParameters) DeepCopyInto(out *HealthcheckHTTPHealthcheckInitParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckHTTPHealthcheckInitParameters. +func (in *HealthcheckHTTPHealthcheckInitParameters) DeepCopy() *HealthcheckHTTPHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(HealthcheckHTTPHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckHTTPHealthcheckObservation) DeepCopyInto(out *HealthcheckHTTPHealthcheckObservation) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckHTTPHealthcheckObservation. +func (in *HealthcheckHTTPHealthcheckObservation) DeepCopy() *HealthcheckHTTPHealthcheckObservation { + if in == nil { + return nil + } + out := new(HealthcheckHTTPHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckHTTPHealthcheckParameters) DeepCopyInto(out *HealthcheckHTTPHealthcheckParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckHTTPHealthcheckParameters. +func (in *HealthcheckHTTPHealthcheckParameters) DeepCopy() *HealthcheckHTTPHealthcheckParameters { + if in == nil { + return nil + } + out := new(HealthcheckHTTPHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckInitParameters) DeepCopyInto(out *HealthcheckInitParameters) { + *out = *in + if in.GRPCHealthcheck != nil { + in, out := &in.GRPCHealthcheck, &out.GRPCHealthcheck + *out = make([]GRPCHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPHealthcheck != nil { + in, out := &in.HTTPHealthcheck, &out.HTTPHealthcheck + *out = make([]HTTPHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthcheckPort != nil { + in, out := &in.HealthcheckPort, &out.HealthcheckPort + *out = new(float64) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.IntervalJitterPercent != nil { + in, out := &in.IntervalJitterPercent, &out.IntervalJitterPercent + *out = new(float64) + **out = **in + } + if in.StreamHealthcheck != nil { + in, out := &in.StreamHealthcheck, &out.StreamHealthcheck + *out = make([]StreamHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckInitParameters. +func (in *HealthcheckInitParameters) DeepCopy() *HealthcheckInitParameters { + if in == nil { + return nil + } + out := new(HealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckObservation) DeepCopyInto(out *HealthcheckObservation) { + *out = *in + if in.GRPCHealthcheck != nil { + in, out := &in.GRPCHealthcheck, &out.GRPCHealthcheck + *out = make([]GRPCHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPHealthcheck != nil { + in, out := &in.HTTPHealthcheck, &out.HTTPHealthcheck + *out = make([]HTTPHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthcheckPort != nil { + in, out := &in.HealthcheckPort, &out.HealthcheckPort + *out = new(float64) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.IntervalJitterPercent != nil { + in, out := &in.IntervalJitterPercent, &out.IntervalJitterPercent + *out = new(float64) + **out = **in + } + if in.StreamHealthcheck != nil { + in, out := &in.StreamHealthcheck, &out.StreamHealthcheck + *out = make([]StreamHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckObservation. +func (in *HealthcheckObservation) DeepCopy() *HealthcheckObservation { + if in == nil { + return nil + } + out := new(HealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckParameters) DeepCopyInto(out *HealthcheckParameters) { + *out = *in + if in.GRPCHealthcheck != nil { + in, out := &in.GRPCHealthcheck, &out.GRPCHealthcheck + *out = make([]GRPCHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPHealthcheck != nil { + in, out := &in.HTTPHealthcheck, &out.HTTPHealthcheck + *out = make([]HTTPHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthcheckPort != nil { + in, out := &in.HealthcheckPort, &out.HealthcheckPort + *out = new(float64) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.IntervalJitterPercent != nil { + in, out := &in.IntervalJitterPercent, &out.IntervalJitterPercent + *out = new(float64) + **out = **in + } + if in.StreamHealthcheck != nil { + in, out := &in.StreamHealthcheck, &out.StreamHealthcheck + *out = make([]StreamHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckParameters. +func (in *HealthcheckParameters) DeepCopy() *HealthcheckParameters { + if in == nil { + return nil + } + out := new(HealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckStreamHealthcheckInitParameters) DeepCopyInto(out *HealthcheckStreamHealthcheckInitParameters) { + *out = *in + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(string) + **out = **in + } + if in.Send != nil { + in, out := &in.Send, &out.Send + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckStreamHealthcheckInitParameters. +func (in *HealthcheckStreamHealthcheckInitParameters) DeepCopy() *HealthcheckStreamHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(HealthcheckStreamHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckStreamHealthcheckObservation) DeepCopyInto(out *HealthcheckStreamHealthcheckObservation) { + *out = *in + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(string) + **out = **in + } + if in.Send != nil { + in, out := &in.Send, &out.Send + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckStreamHealthcheckObservation. +func (in *HealthcheckStreamHealthcheckObservation) DeepCopy() *HealthcheckStreamHealthcheckObservation { + if in == nil { + return nil + } + out := new(HealthcheckStreamHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckStreamHealthcheckParameters) DeepCopyInto(out *HealthcheckStreamHealthcheckParameters) { + *out = *in + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(string) + **out = **in + } + if in.Send != nil { + in, out := &in.Send, &out.Send + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckStreamHealthcheckParameters. +func (in *HealthcheckStreamHealthcheckParameters) DeepCopy() *HealthcheckStreamHealthcheckParameters { + if in == nil { + return nil + } + out := new(HealthcheckStreamHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2OptionsInitParameters) DeepCopyInto(out *Http2OptionsInitParameters) { + *out = *in + if in.MaxConcurrentStreams != nil { + in, out := &in.MaxConcurrentStreams, &out.MaxConcurrentStreams + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2OptionsInitParameters. +func (in *Http2OptionsInitParameters) DeepCopy() *Http2OptionsInitParameters { + if in == nil { + return nil + } + out := new(Http2OptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2OptionsObservation) DeepCopyInto(out *Http2OptionsObservation) { + *out = *in + if in.MaxConcurrentStreams != nil { + in, out := &in.MaxConcurrentStreams, &out.MaxConcurrentStreams + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2OptionsObservation. +func (in *Http2OptionsObservation) DeepCopy() *Http2OptionsObservation { + if in == nil { + return nil + } + out := new(Http2OptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2OptionsParameters) DeepCopyInto(out *Http2OptionsParameters) { + *out = *in + if in.MaxConcurrentStreams != nil { + in, out := &in.MaxConcurrentStreams, &out.MaxConcurrentStreams + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2OptionsParameters. +func (in *Http2OptionsParameters) DeepCopy() *Http2OptionsParameters { + if in == nil { + return nil + } + out := new(Http2OptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalIPv4AddressInitParameters) DeepCopyInto(out *InternalIPv4AddressInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalIPv4AddressInitParameters. +func (in *InternalIPv4AddressInitParameters) DeepCopy() *InternalIPv4AddressInitParameters { + if in == nil { + return nil + } + out := new(InternalIPv4AddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalIPv4AddressObservation) DeepCopyInto(out *InternalIPv4AddressObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalIPv4AddressObservation. +func (in *InternalIPv4AddressObservation) DeepCopy() *InternalIPv4AddressObservation { + if in == nil { + return nil + } + out := new(InternalIPv4AddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalIPv4AddressParameters) DeepCopyInto(out *InternalIPv4AddressParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalIPv4AddressParameters. +func (in *InternalIPv4AddressParameters) DeepCopy() *InternalIPv4AddressParameters { + if in == nil { + return nil + } + out := new(InternalIPv4AddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerInitParameters) DeepCopyInto(out *ListenerInitParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = make([]EndpointInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]HTTPInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = make([]StreamInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]ListenerTLSInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerInitParameters. +func (in *ListenerInitParameters) DeepCopy() *ListenerInitParameters { + if in == nil { + return nil + } + out := new(ListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerObservation) DeepCopyInto(out *ListenerObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = make([]EndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]HTTPObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = make([]StreamObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]ListenerTLSObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerObservation. +func (in *ListenerObservation) DeepCopy() *ListenerObservation { + if in == nil { + return nil + } + out := new(ListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerParameters) DeepCopyInto(out *ListenerParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = make([]EndpointParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]HTTPParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = make([]StreamParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]ListenerTLSParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerParameters. +func (in *ListenerParameters) DeepCopy() *ListenerParameters { + if in == nil { + return nil + } + out := new(ListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSInitParameters) DeepCopyInto(out *ListenerTLSInitParameters) { + *out = *in + if in.DefaultHandler != nil { + in, out := &in.DefaultHandler, &out.DefaultHandler + *out = make([]DefaultHandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SniHandler != nil { + in, out := &in.SniHandler, &out.SniHandler + *out = make([]SniHandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSInitParameters. +func (in *ListenerTLSInitParameters) DeepCopy() *ListenerTLSInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSObservation) DeepCopyInto(out *ListenerTLSObservation) { + *out = *in + if in.DefaultHandler != nil { + in, out := &in.DefaultHandler, &out.DefaultHandler + *out = make([]DefaultHandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SniHandler != nil { + in, out := &in.SniHandler, &out.SniHandler + *out = make([]SniHandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSObservation. +func (in *ListenerTLSObservation) DeepCopy() *ListenerTLSObservation { + if in == nil { + return nil + } + out := new(ListenerTLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSParameters) DeepCopyInto(out *ListenerTLSParameters) { + *out = *in + if in.DefaultHandler != nil { + in, out := &in.DefaultHandler, &out.DefaultHandler + *out = make([]DefaultHandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SniHandler != nil { + in, out := &in.SniHandler, &out.SniHandler + *out = make([]SniHandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSParameters. +func (in *ListenerTLSParameters) DeepCopy() *ListenerTLSParameters { + if in == nil { + return nil + } + out := new(ListenerTLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. +func (in *LoadBalancer) DeepCopy() *LoadBalancer { + if in == nil { + return nil + } + out := new(LoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LoadBalancer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerInitParameters) DeepCopyInto(out *LoadBalancerInitParameters) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = make([]AllocationPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerInitParameters. +func (in *LoadBalancerInitParameters) DeepCopy() *LoadBalancerInitParameters { + if in == nil { + return nil + } + out := new(LoadBalancerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerList) DeepCopyInto(out *LoadBalancerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LoadBalancer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerList. +func (in *LoadBalancerList) DeepCopy() *LoadBalancerList { + if in == nil { + return nil + } + out := new(LoadBalancerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LoadBalancerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerObservation) DeepCopyInto(out *LoadBalancerObservation) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = make([]AllocationPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerObservation. +func (in *LoadBalancerObservation) DeepCopy() *LoadBalancerObservation { + if in == nil { + return nil + } + out := new(LoadBalancerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerParameters) DeepCopyInto(out *LoadBalancerParameters) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = make([]AllocationPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerParameters. +func (in *LoadBalancerParameters) DeepCopy() *LoadBalancerParameters { + if in == nil { + return nil + } + out := new(LoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerSpec) DeepCopyInto(out *LoadBalancerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSpec. +func (in *LoadBalancerSpec) DeepCopy() *LoadBalancerSpec { + if in == nil { + return nil + } + out := new(LoadBalancerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. +func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { + if in == nil { + return nil + } + out := new(LoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancingConfigInitParameters) DeepCopyInto(out *LoadBalancingConfigInitParameters) { + *out = *in + if in.LocalityAwareRoutingPercent != nil { + in, out := &in.LocalityAwareRoutingPercent, &out.LocalityAwareRoutingPercent + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PanicThreshold != nil { + in, out := &in.PanicThreshold, &out.PanicThreshold + *out = new(float64) + **out = **in + } + if in.StrictLocality != nil { + in, out := &in.StrictLocality, &out.StrictLocality + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancingConfigInitParameters. +func (in *LoadBalancingConfigInitParameters) DeepCopy() *LoadBalancingConfigInitParameters { + if in == nil { + return nil + } + out := new(LoadBalancingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancingConfigObservation) DeepCopyInto(out *LoadBalancingConfigObservation) { + *out = *in + if in.LocalityAwareRoutingPercent != nil { + in, out := &in.LocalityAwareRoutingPercent, &out.LocalityAwareRoutingPercent + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PanicThreshold != nil { + in, out := &in.PanicThreshold, &out.PanicThreshold + *out = new(float64) + **out = **in + } + if in.StrictLocality != nil { + in, out := &in.StrictLocality, &out.StrictLocality + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancingConfigObservation. +func (in *LoadBalancingConfigObservation) DeepCopy() *LoadBalancingConfigObservation { + if in == nil { + return nil + } + out := new(LoadBalancingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancingConfigParameters) DeepCopyInto(out *LoadBalancingConfigParameters) { + *out = *in + if in.LocalityAwareRoutingPercent != nil { + in, out := &in.LocalityAwareRoutingPercent, &out.LocalityAwareRoutingPercent + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PanicThreshold != nil { + in, out := &in.PanicThreshold, &out.PanicThreshold + *out = new(float64) + **out = **in + } + if in.StrictLocality != nil { + in, out := &in.StrictLocality, &out.StrictLocality + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancingConfigParameters. +func (in *LoadBalancingConfigParameters) DeepCopy() *LoadBalancingConfigParameters { + if in == nil { + return nil + } + out := new(LoadBalancingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationInitParameters) DeepCopyInto(out *LocationInitParameters) { + *out = *in + if in.DisableTraffic != nil { + in, out := &in.DisableTraffic, &out.DisableTraffic + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationInitParameters. +func (in *LocationInitParameters) DeepCopy() *LocationInitParameters { + if in == nil { + return nil + } + out := new(LocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationObservation) DeepCopyInto(out *LocationObservation) { + *out = *in + if in.DisableTraffic != nil { + in, out := &in.DisableTraffic, &out.DisableTraffic + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationObservation. +func (in *LocationObservation) DeepCopy() *LocationObservation { + if in == nil { + return nil + } + out := new(LocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationParameters) DeepCopyInto(out *LocationParameters) { + *out = *in + if in.DisableTraffic != nil { + in, out := &in.DisableTraffic, &out.DisableTraffic + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationParameters. +func (in *LocationParameters) DeepCopy() *LocationParameters { + if in == nil { + return nil + } + out := new(LocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsInitParameters) DeepCopyInto(out *LogOptionsInitParameters) { + *out = *in + if in.Disable != nil { + in, out := &in.Disable, &out.Disable + *out = new(bool) + **out = **in + } + if in.DiscardRule != nil { + in, out := &in.DiscardRule, &out.DiscardRule + *out = make([]DiscardRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsInitParameters. +func (in *LogOptionsInitParameters) DeepCopy() *LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsObservation) DeepCopyInto(out *LogOptionsObservation) { + *out = *in + if in.Disable != nil { + in, out := &in.Disable, &out.Disable + *out = new(bool) + **out = **in + } + if in.DiscardRule != nil { + in, out := &in.DiscardRule, &out.DiscardRule + *out = make([]DiscardRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsObservation. +func (in *LogOptionsObservation) DeepCopy() *LogOptionsObservation { + if in == nil { + return nil + } + out := new(LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsParameters) DeepCopyInto(out *LogOptionsParameters) { + *out = *in + if in.Disable != nil { + in, out := &in.Disable, &out.Disable + *out = new(bool) + **out = **in + } + if in.DiscardRule != nil { + in, out := &in.DiscardRule, &out.DiscardRule + *out = make([]DiscardRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsParameters. +func (in *LogOptionsParameters) DeepCopy() *LogOptionsParameters { + if in == nil { + return nil + } + out := new(LogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyRequestHeadersInitParameters) DeepCopyInto(out *ModifyRequestHeadersInitParameters) { + *out = *in + if in.Append != nil { + in, out := &in.Append, &out.Append + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = new(bool) + **out = **in + } + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyRequestHeadersInitParameters. +func (in *ModifyRequestHeadersInitParameters) DeepCopy() *ModifyRequestHeadersInitParameters { + if in == nil { + return nil + } + out := new(ModifyRequestHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyRequestHeadersObservation) DeepCopyInto(out *ModifyRequestHeadersObservation) { + *out = *in + if in.Append != nil { + in, out := &in.Append, &out.Append + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = new(bool) + **out = **in + } + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyRequestHeadersObservation. +func (in *ModifyRequestHeadersObservation) DeepCopy() *ModifyRequestHeadersObservation { + if in == nil { + return nil + } + out := new(ModifyRequestHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyRequestHeadersParameters) DeepCopyInto(out *ModifyRequestHeadersParameters) { + *out = *in + if in.Append != nil { + in, out := &in.Append, &out.Append + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = new(bool) + **out = **in + } + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyRequestHeadersParameters. +func (in *ModifyRequestHeadersParameters) DeepCopy() *ModifyRequestHeadersParameters { + if in == nil { + return nil + } + out := new(ModifyRequestHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyResponseHeadersInitParameters) DeepCopyInto(out *ModifyResponseHeadersInitParameters) { + *out = *in + if in.Append != nil { + in, out := &in.Append, &out.Append + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = new(bool) + **out = **in + } + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyResponseHeadersInitParameters. +func (in *ModifyResponseHeadersInitParameters) DeepCopy() *ModifyResponseHeadersInitParameters { + if in == nil { + return nil + } + out := new(ModifyResponseHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyResponseHeadersObservation) DeepCopyInto(out *ModifyResponseHeadersObservation) { + *out = *in + if in.Append != nil { + in, out := &in.Append, &out.Append + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = new(bool) + **out = **in + } + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyResponseHeadersObservation. +func (in *ModifyResponseHeadersObservation) DeepCopy() *ModifyResponseHeadersObservation { + if in == nil { + return nil + } + out := new(ModifyResponseHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyResponseHeadersParameters) DeepCopyInto(out *ModifyResponseHeadersParameters) { + *out = *in + if in.Append != nil { + in, out := &in.Append, &out.Append + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = new(bool) + **out = **in + } + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyResponseHeadersParameters. +func (in *ModifyResponseHeadersParameters) DeepCopy() *ModifyResponseHeadersParameters { + if in == nil { + return nil + } + out := new(ModifyResponseHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathInitParameters) DeepCopyInto(out *PathInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathInitParameters. +func (in *PathInitParameters) DeepCopy() *PathInitParameters { + if in == nil { + return nil + } + out := new(PathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathObservation) DeepCopyInto(out *PathObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathObservation. +func (in *PathObservation) DeepCopy() *PathObservation { + if in == nil { + return nil + } + out := new(PathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathParameters) DeepCopyInto(out *PathParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathParameters. +func (in *PathParameters) DeepCopy() *PathParameters { + if in == nil { + return nil + } + out := new(PathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalsAndPrincipalsHeaderInitParameters) DeepCopyInto(out *PrincipalsAndPrincipalsHeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]HeaderValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalsAndPrincipalsHeaderInitParameters. +func (in *PrincipalsAndPrincipalsHeaderInitParameters) DeepCopy() *PrincipalsAndPrincipalsHeaderInitParameters { + if in == nil { + return nil + } + out := new(PrincipalsAndPrincipalsHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalsAndPrincipalsHeaderObservation) DeepCopyInto(out *PrincipalsAndPrincipalsHeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]HeaderValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalsAndPrincipalsHeaderObservation. +func (in *PrincipalsAndPrincipalsHeaderObservation) DeepCopy() *PrincipalsAndPrincipalsHeaderObservation { + if in == nil { + return nil + } + out := new(PrincipalsAndPrincipalsHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalsAndPrincipalsHeaderParameters) DeepCopyInto(out *PrincipalsAndPrincipalsHeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]HeaderValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalsAndPrincipalsHeaderParameters. +func (in *PrincipalsAndPrincipalsHeaderParameters) DeepCopy() *PrincipalsAndPrincipalsHeaderParameters { + if in == nil { + return nil + } + out := new(PrincipalsAndPrincipalsHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalsAndPrincipalsInitParameters) DeepCopyInto(out *PrincipalsAndPrincipalsInitParameters) { + *out = *in + if in.Any != nil { + in, out := &in.Any, &out.Any + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]PrincipalsAndPrincipalsHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteIP != nil { + in, out := &in.RemoteIP, &out.RemoteIP + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalsAndPrincipalsInitParameters. +func (in *PrincipalsAndPrincipalsInitParameters) DeepCopy() *PrincipalsAndPrincipalsInitParameters { + if in == nil { + return nil + } + out := new(PrincipalsAndPrincipalsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalsAndPrincipalsObservation) DeepCopyInto(out *PrincipalsAndPrincipalsObservation) { + *out = *in + if in.Any != nil { + in, out := &in.Any, &out.Any + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]PrincipalsAndPrincipalsHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteIP != nil { + in, out := &in.RemoteIP, &out.RemoteIP + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalsAndPrincipalsObservation. +func (in *PrincipalsAndPrincipalsObservation) DeepCopy() *PrincipalsAndPrincipalsObservation { + if in == nil { + return nil + } + out := new(PrincipalsAndPrincipalsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalsAndPrincipalsParameters) DeepCopyInto(out *PrincipalsAndPrincipalsParameters) { + *out = *in + if in.Any != nil { + in, out := &in.Any, &out.Any + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]PrincipalsAndPrincipalsHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteIP != nil { + in, out := &in.RemoteIP, &out.RemoteIP + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalsAndPrincipalsParameters. +func (in *PrincipalsAndPrincipalsParameters) DeepCopy() *PrincipalsAndPrincipalsParameters { + if in == nil { + return nil + } + out := new(PrincipalsAndPrincipalsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalsInitParameters) DeepCopyInto(out *PrincipalsInitParameters) { + *out = *in + if in.AndPrincipals != nil { + in, out := &in.AndPrincipals, &out.AndPrincipals + *out = make([]AndPrincipalsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalsInitParameters. +func (in *PrincipalsInitParameters) DeepCopy() *PrincipalsInitParameters { + if in == nil { + return nil + } + out := new(PrincipalsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalsObservation) DeepCopyInto(out *PrincipalsObservation) { + *out = *in + if in.AndPrincipals != nil { + in, out := &in.AndPrincipals, &out.AndPrincipals + *out = make([]AndPrincipalsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalsObservation. +func (in *PrincipalsObservation) DeepCopy() *PrincipalsObservation { + if in == nil { + return nil + } + out := new(PrincipalsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalsParameters) DeepCopyInto(out *PrincipalsParameters) { + *out = *in + if in.AndPrincipals != nil { + in, out := &in.AndPrincipals, &out.AndPrincipals + *out = make([]AndPrincipalsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalsParameters. +func (in *PrincipalsParameters) DeepCopy() *PrincipalsParameters { + if in == nil { + return nil + } + out := new(PrincipalsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacInitParameters) DeepCopyInto(out *RbacInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Principals != nil { + in, out := &in.Principals, &out.Principals + *out = make([]PrincipalsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacInitParameters. +func (in *RbacInitParameters) DeepCopy() *RbacInitParameters { + if in == nil { + return nil + } + out := new(RbacInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacObservation) DeepCopyInto(out *RbacObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Principals != nil { + in, out := &in.Principals, &out.Principals + *out = make([]PrincipalsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacObservation. +func (in *RbacObservation) DeepCopy() *RbacObservation { + if in == nil { + return nil + } + out := new(RbacObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacParameters) DeepCopyInto(out *RbacParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Principals != nil { + in, out := &in.Principals, &out.Principals + *out = make([]PrincipalsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacParameters. +func (in *RbacParameters) DeepCopy() *RbacParameters { + if in == nil { + return nil + } + out := new(RbacParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacPrincipalsAndPrincipalsHeaderInitParameters) DeepCopyInto(out *RbacPrincipalsAndPrincipalsHeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]AndPrincipalsHeaderValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacPrincipalsAndPrincipalsHeaderInitParameters. +func (in *RbacPrincipalsAndPrincipalsHeaderInitParameters) DeepCopy() *RbacPrincipalsAndPrincipalsHeaderInitParameters { + if in == nil { + return nil + } + out := new(RbacPrincipalsAndPrincipalsHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacPrincipalsAndPrincipalsHeaderObservation) DeepCopyInto(out *RbacPrincipalsAndPrincipalsHeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]AndPrincipalsHeaderValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacPrincipalsAndPrincipalsHeaderObservation. +func (in *RbacPrincipalsAndPrincipalsHeaderObservation) DeepCopy() *RbacPrincipalsAndPrincipalsHeaderObservation { + if in == nil { + return nil + } + out := new(RbacPrincipalsAndPrincipalsHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacPrincipalsAndPrincipalsHeaderParameters) DeepCopyInto(out *RbacPrincipalsAndPrincipalsHeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]AndPrincipalsHeaderValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacPrincipalsAndPrincipalsHeaderParameters. +func (in *RbacPrincipalsAndPrincipalsHeaderParameters) DeepCopy() *RbacPrincipalsAndPrincipalsHeaderParameters { + if in == nil { + return nil + } + out := new(RbacPrincipalsAndPrincipalsHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacPrincipalsAndPrincipalsInitParameters) DeepCopyInto(out *RbacPrincipalsAndPrincipalsInitParameters) { + *out = *in + if in.Any != nil { + in, out := &in.Any, &out.Any + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]RbacPrincipalsAndPrincipalsHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteIP != nil { + in, out := &in.RemoteIP, &out.RemoteIP + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacPrincipalsAndPrincipalsInitParameters. +func (in *RbacPrincipalsAndPrincipalsInitParameters) DeepCopy() *RbacPrincipalsAndPrincipalsInitParameters { + if in == nil { + return nil + } + out := new(RbacPrincipalsAndPrincipalsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacPrincipalsAndPrincipalsObservation) DeepCopyInto(out *RbacPrincipalsAndPrincipalsObservation) { + *out = *in + if in.Any != nil { + in, out := &in.Any, &out.Any + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]RbacPrincipalsAndPrincipalsHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteIP != nil { + in, out := &in.RemoteIP, &out.RemoteIP + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacPrincipalsAndPrincipalsObservation. +func (in *RbacPrincipalsAndPrincipalsObservation) DeepCopy() *RbacPrincipalsAndPrincipalsObservation { + if in == nil { + return nil + } + out := new(RbacPrincipalsAndPrincipalsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacPrincipalsAndPrincipalsParameters) DeepCopyInto(out *RbacPrincipalsAndPrincipalsParameters) { + *out = *in + if in.Any != nil { + in, out := &in.Any, &out.Any + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]RbacPrincipalsAndPrincipalsHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteIP != nil { + in, out := &in.RemoteIP, &out.RemoteIP + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacPrincipalsAndPrincipalsParameters. +func (in *RbacPrincipalsAndPrincipalsParameters) DeepCopy() *RbacPrincipalsAndPrincipalsParameters { + if in == nil { + return nil + } + out := new(RbacPrincipalsAndPrincipalsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacPrincipalsInitParameters) DeepCopyInto(out *RbacPrincipalsInitParameters) { + *out = *in + if in.AndPrincipals != nil { + in, out := &in.AndPrincipals, &out.AndPrincipals + *out = make([]PrincipalsAndPrincipalsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacPrincipalsInitParameters. +func (in *RbacPrincipalsInitParameters) DeepCopy() *RbacPrincipalsInitParameters { + if in == nil { + return nil + } + out := new(RbacPrincipalsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacPrincipalsObservation) DeepCopyInto(out *RbacPrincipalsObservation) { + *out = *in + if in.AndPrincipals != nil { + in, out := &in.AndPrincipals, &out.AndPrincipals + *out = make([]PrincipalsAndPrincipalsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacPrincipalsObservation. +func (in *RbacPrincipalsObservation) DeepCopy() *RbacPrincipalsObservation { + if in == nil { + return nil + } + out := new(RbacPrincipalsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RbacPrincipalsParameters) DeepCopyInto(out *RbacPrincipalsParameters) { + *out = *in + if in.AndPrincipals != nil { + in, out := &in.AndPrincipals, &out.AndPrincipals + *out = make([]PrincipalsAndPrincipalsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RbacPrincipalsParameters. +func (in *RbacPrincipalsParameters) DeepCopy() *RbacPrincipalsParameters { + if in == nil { + return nil + } + out := new(RbacPrincipalsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectActionInitParameters) DeepCopyInto(out *RedirectActionInitParameters) { + *out = *in + if in.RemoveQuery != nil { + in, out := &in.RemoveQuery, &out.RemoveQuery + *out = new(bool) + **out = **in + } + if in.ReplaceHost != nil { + in, out := &in.ReplaceHost, &out.ReplaceHost + *out = new(string) + **out = **in + } + if in.ReplacePath != nil { + in, out := &in.ReplacePath, &out.ReplacePath + *out = new(string) + **out = **in + } + if in.ReplacePort != nil { + in, out := &in.ReplacePort, &out.ReplacePort + *out = new(float64) + **out = **in + } + if in.ReplacePrefix != nil { + in, out := &in.ReplacePrefix, &out.ReplacePrefix + *out = new(string) + **out = **in + } + if in.ReplaceScheme != nil { + in, out := &in.ReplaceScheme, &out.ReplaceScheme + *out = new(string) + **out = **in + } + if in.ResponseCode != nil { + in, out := &in.ResponseCode, &out.ResponseCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectActionInitParameters. +func (in *RedirectActionInitParameters) DeepCopy() *RedirectActionInitParameters { + if in == nil { + return nil + } + out := new(RedirectActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectActionObservation) DeepCopyInto(out *RedirectActionObservation) { + *out = *in + if in.RemoveQuery != nil { + in, out := &in.RemoveQuery, &out.RemoveQuery + *out = new(bool) + **out = **in + } + if in.ReplaceHost != nil { + in, out := &in.ReplaceHost, &out.ReplaceHost + *out = new(string) + **out = **in + } + if in.ReplacePath != nil { + in, out := &in.ReplacePath, &out.ReplacePath + *out = new(string) + **out = **in + } + if in.ReplacePort != nil { + in, out := &in.ReplacePort, &out.ReplacePort + *out = new(float64) + **out = **in + } + if in.ReplacePrefix != nil { + in, out := &in.ReplacePrefix, &out.ReplacePrefix + *out = new(string) + **out = **in + } + if in.ReplaceScheme != nil { + in, out := &in.ReplaceScheme, &out.ReplaceScheme + *out = new(string) + **out = **in + } + if in.ResponseCode != nil { + in, out := &in.ResponseCode, &out.ResponseCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectActionObservation. +func (in *RedirectActionObservation) DeepCopy() *RedirectActionObservation { + if in == nil { + return nil + } + out := new(RedirectActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectActionParameters) DeepCopyInto(out *RedirectActionParameters) { + *out = *in + if in.RemoveQuery != nil { + in, out := &in.RemoveQuery, &out.RemoveQuery + *out = new(bool) + **out = **in + } + if in.ReplaceHost != nil { + in, out := &in.ReplaceHost, &out.ReplaceHost + *out = new(string) + **out = **in + } + if in.ReplacePath != nil { + in, out := &in.ReplacePath, &out.ReplacePath + *out = new(string) + **out = **in + } + if in.ReplacePort != nil { + in, out := &in.ReplacePort, &out.ReplacePort + *out = new(float64) + **out = **in + } + if in.ReplacePrefix != nil { + in, out := &in.ReplacePrefix, &out.ReplacePrefix + *out = new(string) + **out = **in + } + if in.ReplaceScheme != nil { + in, out := &in.ReplaceScheme, &out.ReplaceScheme + *out = new(string) + **out = **in + } + if in.ResponseCode != nil { + in, out := &in.ResponseCode, &out.ResponseCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectActionParameters. +func (in *RedirectActionParameters) DeepCopy() *RedirectActionParameters { + if in == nil { + return nil + } + out := new(RedirectActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectsInitParameters) DeepCopyInto(out *RedirectsInitParameters) { + *out = *in + if in.HTTPToHTTPS != nil { + in, out := &in.HTTPToHTTPS, &out.HTTPToHTTPS + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectsInitParameters. +func (in *RedirectsInitParameters) DeepCopy() *RedirectsInitParameters { + if in == nil { + return nil + } + out := new(RedirectsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectsObservation) DeepCopyInto(out *RedirectsObservation) { + *out = *in + if in.HTTPToHTTPS != nil { + in, out := &in.HTTPToHTTPS, &out.HTTPToHTTPS + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectsObservation. +func (in *RedirectsObservation) DeepCopy() *RedirectsObservation { + if in == nil { + return nil + } + out := new(RedirectsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectsParameters) DeepCopyInto(out *RedirectsParameters) { + *out = *in + if in.HTTPToHTTPS != nil { + in, out := &in.HTTPToHTTPS, &out.HTTPToHTTPS + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectsParameters. +func (in *RedirectsParameters) DeepCopy() *RedirectsParameters { + if in == nil { + return nil + } + out := new(RedirectsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteInitParameters) DeepCopyInto(out *RouteInitParameters) { + *out = *in + if in.GRPCRoute != nil { + in, out := &in.GRPCRoute, &out.GRPCRoute + *out = make([]GRPCRouteInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPRoute != nil { + in, out := &in.HTTPRoute, &out.HTTPRoute + *out = make([]HTTPRouteInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RouteOptions != nil { + in, out := &in.RouteOptions, &out.RouteOptions + *out = make([]RouteRouteOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteInitParameters. +func (in *RouteInitParameters) DeepCopy() *RouteInitParameters { + if in == nil { + return nil + } + out := new(RouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteObservation) DeepCopyInto(out *RouteObservation) { + *out = *in + if in.GRPCRoute != nil { + in, out := &in.GRPCRoute, &out.GRPCRoute + *out = make([]GRPCRouteObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPRoute != nil { + in, out := &in.HTTPRoute, &out.HTTPRoute + *out = make([]HTTPRouteObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RouteOptions != nil { + in, out := &in.RouteOptions, &out.RouteOptions + *out = make([]RouteRouteOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteObservation. +func (in *RouteObservation) DeepCopy() *RouteObservation { + if in == nil { + return nil + } + out := new(RouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteOptionsInitParameters) DeepCopyInto(out *RouteOptionsInitParameters) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = make([]RbacInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityProfileID != nil { + in, out := &in.SecurityProfileID, &out.SecurityProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteOptionsInitParameters. +func (in *RouteOptionsInitParameters) DeepCopy() *RouteOptionsInitParameters { + if in == nil { + return nil + } + out := new(RouteOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteOptionsObservation) DeepCopyInto(out *RouteOptionsObservation) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = make([]RbacObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityProfileID != nil { + in, out := &in.SecurityProfileID, &out.SecurityProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteOptionsObservation. +func (in *RouteOptionsObservation) DeepCopy() *RouteOptionsObservation { + if in == nil { + return nil + } + out := new(RouteOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteOptionsParameters) DeepCopyInto(out *RouteOptionsParameters) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = make([]RbacParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityProfileID != nil { + in, out := &in.SecurityProfileID, &out.SecurityProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteOptionsParameters. +func (in *RouteOptionsParameters) DeepCopy() *RouteOptionsParameters { + if in == nil { + return nil + } + out := new(RouteOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteOptionsRbacInitParameters) DeepCopyInto(out *RouteOptionsRbacInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Principals != nil { + in, out := &in.Principals, &out.Principals + *out = make([]RbacPrincipalsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteOptionsRbacInitParameters. +func (in *RouteOptionsRbacInitParameters) DeepCopy() *RouteOptionsRbacInitParameters { + if in == nil { + return nil + } + out := new(RouteOptionsRbacInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteOptionsRbacObservation) DeepCopyInto(out *RouteOptionsRbacObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Principals != nil { + in, out := &in.Principals, &out.Principals + *out = make([]RbacPrincipalsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteOptionsRbacObservation. +func (in *RouteOptionsRbacObservation) DeepCopy() *RouteOptionsRbacObservation { + if in == nil { + return nil + } + out := new(RouteOptionsRbacObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteOptionsRbacParameters) DeepCopyInto(out *RouteOptionsRbacParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Principals != nil { + in, out := &in.Principals, &out.Principals + *out = make([]RbacPrincipalsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteOptionsRbacParameters. +func (in *RouteOptionsRbacParameters) DeepCopy() *RouteOptionsRbacParameters { + if in == nil { + return nil + } + out := new(RouteOptionsRbacParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteOptionsRbacPrincipalsInitParameters) DeepCopyInto(out *RouteOptionsRbacPrincipalsInitParameters) { + *out = *in + if in.AndPrincipals != nil { + in, out := &in.AndPrincipals, &out.AndPrincipals + *out = make([]RbacPrincipalsAndPrincipalsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteOptionsRbacPrincipalsInitParameters. +func (in *RouteOptionsRbacPrincipalsInitParameters) DeepCopy() *RouteOptionsRbacPrincipalsInitParameters { + if in == nil { + return nil + } + out := new(RouteOptionsRbacPrincipalsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteOptionsRbacPrincipalsObservation) DeepCopyInto(out *RouteOptionsRbacPrincipalsObservation) { + *out = *in + if in.AndPrincipals != nil { + in, out := &in.AndPrincipals, &out.AndPrincipals + *out = make([]RbacPrincipalsAndPrincipalsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteOptionsRbacPrincipalsObservation. +func (in *RouteOptionsRbacPrincipalsObservation) DeepCopy() *RouteOptionsRbacPrincipalsObservation { + if in == nil { + return nil + } + out := new(RouteOptionsRbacPrincipalsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteOptionsRbacPrincipalsParameters) DeepCopyInto(out *RouteOptionsRbacPrincipalsParameters) { + *out = *in + if in.AndPrincipals != nil { + in, out := &in.AndPrincipals, &out.AndPrincipals + *out = make([]RbacPrincipalsAndPrincipalsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteOptionsRbacPrincipalsParameters. +func (in *RouteOptionsRbacPrincipalsParameters) DeepCopy() *RouteOptionsRbacPrincipalsParameters { + if in == nil { + return nil + } + out := new(RouteOptionsRbacPrincipalsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteParameters) DeepCopyInto(out *RouteParameters) { + *out = *in + if in.GRPCRoute != nil { + in, out := &in.GRPCRoute, &out.GRPCRoute + *out = make([]GRPCRouteParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPRoute != nil { + in, out := &in.HTTPRoute, &out.HTTPRoute + *out = make([]HTTPRouteParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RouteOptions != nil { + in, out := &in.RouteOptions, &out.RouteOptions + *out = make([]RouteRouteOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteParameters. +func (in *RouteParameters) DeepCopy() *RouteParameters { + if in == nil { + return nil + } + out := new(RouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteRouteOptionsInitParameters) DeepCopyInto(out *RouteRouteOptionsInitParameters) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = make([]RouteOptionsRbacInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityProfileID != nil { + in, out := &in.SecurityProfileID, &out.SecurityProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteRouteOptionsInitParameters. +func (in *RouteRouteOptionsInitParameters) DeepCopy() *RouteRouteOptionsInitParameters { + if in == nil { + return nil + } + out := new(RouteRouteOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteRouteOptionsObservation) DeepCopyInto(out *RouteRouteOptionsObservation) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = make([]RouteOptionsRbacObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityProfileID != nil { + in, out := &in.SecurityProfileID, &out.SecurityProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteRouteOptionsObservation. +func (in *RouteRouteOptionsObservation) DeepCopy() *RouteRouteOptionsObservation { + if in == nil { + return nil + } + out := new(RouteRouteOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteRouteOptionsParameters) DeepCopyInto(out *RouteRouteOptionsParameters) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = make([]RouteOptionsRbacParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityProfileID != nil { + in, out := &in.SecurityProfileID, &out.SecurityProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteRouteOptionsParameters. +func (in *RouteRouteOptionsParameters) DeepCopy() *RouteRouteOptionsParameters { + if in == nil { + return nil + } + out := new(RouteRouteOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityInitParameters) DeepCopyInto(out *SessionAffinityInitParameters) { + *out = *in + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]ConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Cookie != nil { + in, out := &in.Cookie, &out.Cookie + *out = make([]CookieInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityInitParameters. +func (in *SessionAffinityInitParameters) DeepCopy() *SessionAffinityInitParameters { + if in == nil { + return nil + } + out := new(SessionAffinityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityObservation) DeepCopyInto(out *SessionAffinityObservation) { + *out = *in + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]ConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Cookie != nil { + in, out := &in.Cookie, &out.Cookie + *out = make([]CookieObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityObservation. +func (in *SessionAffinityObservation) DeepCopy() *SessionAffinityObservation { + if in == nil { + return nil + } + out := new(SessionAffinityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityParameters) DeepCopyInto(out *SessionAffinityParameters) { + *out = *in + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]ConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Cookie != nil { + in, out := &in.Cookie, &out.Cookie + *out = make([]CookieParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityParameters. +func (in *SessionAffinityParameters) DeepCopy() *SessionAffinityParameters { + if in == nil { + return nil + } + out := new(SessionAffinityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SniHandlerHandlerInitParameters) DeepCopyInto(out *SniHandlerHandlerInitParameters) { + *out = *in + if in.CertificateIds != nil { + in, out := &in.CertificateIds, &out.CertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPHandler != nil { + in, out := &in.HTTPHandler, &out.HTTPHandler + *out = make([]HandlerHTTPHandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamHandler != nil { + in, out := &in.StreamHandler, &out.StreamHandler + *out = make([]HandlerStreamHandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SniHandlerHandlerInitParameters. +func (in *SniHandlerHandlerInitParameters) DeepCopy() *SniHandlerHandlerInitParameters { + if in == nil { + return nil + } + out := new(SniHandlerHandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SniHandlerHandlerObservation) DeepCopyInto(out *SniHandlerHandlerObservation) { + *out = *in + if in.CertificateIds != nil { + in, out := &in.CertificateIds, &out.CertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPHandler != nil { + in, out := &in.HTTPHandler, &out.HTTPHandler + *out = make([]HandlerHTTPHandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamHandler != nil { + in, out := &in.StreamHandler, &out.StreamHandler + *out = make([]HandlerStreamHandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SniHandlerHandlerObservation. +func (in *SniHandlerHandlerObservation) DeepCopy() *SniHandlerHandlerObservation { + if in == nil { + return nil + } + out := new(SniHandlerHandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SniHandlerHandlerParameters) DeepCopyInto(out *SniHandlerHandlerParameters) { + *out = *in + if in.CertificateIds != nil { + in, out := &in.CertificateIds, &out.CertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPHandler != nil { + in, out := &in.HTTPHandler, &out.HTTPHandler + *out = make([]HandlerHTTPHandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamHandler != nil { + in, out := &in.StreamHandler, &out.StreamHandler + *out = make([]HandlerStreamHandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SniHandlerHandlerParameters. +func (in *SniHandlerHandlerParameters) DeepCopy() *SniHandlerHandlerParameters { + if in == nil { + return nil + } + out := new(SniHandlerHandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SniHandlerInitParameters) DeepCopyInto(out *SniHandlerInitParameters) { + *out = *in + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make([]SniHandlerHandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServerNames != nil { + in, out := &in.ServerNames, &out.ServerNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SniHandlerInitParameters. +func (in *SniHandlerInitParameters) DeepCopy() *SniHandlerInitParameters { + if in == nil { + return nil + } + out := new(SniHandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SniHandlerObservation) DeepCopyInto(out *SniHandlerObservation) { + *out = *in + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make([]SniHandlerHandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServerNames != nil { + in, out := &in.ServerNames, &out.ServerNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SniHandlerObservation. +func (in *SniHandlerObservation) DeepCopy() *SniHandlerObservation { + if in == nil { + return nil + } + out := new(SniHandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SniHandlerParameters) DeepCopyInto(out *SniHandlerParameters) { + *out = *in + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make([]SniHandlerHandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServerNames != nil { + in, out := &in.ServerNames, &out.ServerNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SniHandlerParameters. +func (in *SniHandlerParameters) DeepCopy() *SniHandlerParameters { + if in == nil { + return nil + } + out := new(SniHandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckGRPCHealthcheckInitParameters) DeepCopyInto(out *StreamBackendHealthcheckGRPCHealthcheckInitParameters) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckGRPCHealthcheckInitParameters. +func (in *StreamBackendHealthcheckGRPCHealthcheckInitParameters) DeepCopy() *StreamBackendHealthcheckGRPCHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckGRPCHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckGRPCHealthcheckObservation) DeepCopyInto(out *StreamBackendHealthcheckGRPCHealthcheckObservation) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckGRPCHealthcheckObservation. +func (in *StreamBackendHealthcheckGRPCHealthcheckObservation) DeepCopy() *StreamBackendHealthcheckGRPCHealthcheckObservation { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckGRPCHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckGRPCHealthcheckParameters) DeepCopyInto(out *StreamBackendHealthcheckGRPCHealthcheckParameters) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckGRPCHealthcheckParameters. +func (in *StreamBackendHealthcheckGRPCHealthcheckParameters) DeepCopy() *StreamBackendHealthcheckGRPCHealthcheckParameters { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckGRPCHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckHTTPHealthcheckInitParameters) DeepCopyInto(out *StreamBackendHealthcheckHTTPHealthcheckInitParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckHTTPHealthcheckInitParameters. +func (in *StreamBackendHealthcheckHTTPHealthcheckInitParameters) DeepCopy() *StreamBackendHealthcheckHTTPHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckHTTPHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckHTTPHealthcheckObservation) DeepCopyInto(out *StreamBackendHealthcheckHTTPHealthcheckObservation) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckHTTPHealthcheckObservation. +func (in *StreamBackendHealthcheckHTTPHealthcheckObservation) DeepCopy() *StreamBackendHealthcheckHTTPHealthcheckObservation { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckHTTPHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckHTTPHealthcheckParameters) DeepCopyInto(out *StreamBackendHealthcheckHTTPHealthcheckParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckHTTPHealthcheckParameters. +func (in *StreamBackendHealthcheckHTTPHealthcheckParameters) DeepCopy() *StreamBackendHealthcheckHTTPHealthcheckParameters { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckHTTPHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckInitParameters) DeepCopyInto(out *StreamBackendHealthcheckInitParameters) { + *out = *in + if in.GRPCHealthcheck != nil { + in, out := &in.GRPCHealthcheck, &out.GRPCHealthcheck + *out = make([]StreamBackendHealthcheckGRPCHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPHealthcheck != nil { + in, out := &in.HTTPHealthcheck, &out.HTTPHealthcheck + *out = make([]StreamBackendHealthcheckHTTPHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthcheckPort != nil { + in, out := &in.HealthcheckPort, &out.HealthcheckPort + *out = new(float64) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.IntervalJitterPercent != nil { + in, out := &in.IntervalJitterPercent, &out.IntervalJitterPercent + *out = new(float64) + **out = **in + } + if in.StreamHealthcheck != nil { + in, out := &in.StreamHealthcheck, &out.StreamHealthcheck + *out = make([]StreamBackendHealthcheckStreamHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckInitParameters. +func (in *StreamBackendHealthcheckInitParameters) DeepCopy() *StreamBackendHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckObservation) DeepCopyInto(out *StreamBackendHealthcheckObservation) { + *out = *in + if in.GRPCHealthcheck != nil { + in, out := &in.GRPCHealthcheck, &out.GRPCHealthcheck + *out = make([]StreamBackendHealthcheckGRPCHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPHealthcheck != nil { + in, out := &in.HTTPHealthcheck, &out.HTTPHealthcheck + *out = make([]StreamBackendHealthcheckHTTPHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthcheckPort != nil { + in, out := &in.HealthcheckPort, &out.HealthcheckPort + *out = new(float64) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.IntervalJitterPercent != nil { + in, out := &in.IntervalJitterPercent, &out.IntervalJitterPercent + *out = new(float64) + **out = **in + } + if in.StreamHealthcheck != nil { + in, out := &in.StreamHealthcheck, &out.StreamHealthcheck + *out = make([]StreamBackendHealthcheckStreamHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckObservation. +func (in *StreamBackendHealthcheckObservation) DeepCopy() *StreamBackendHealthcheckObservation { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckParameters) DeepCopyInto(out *StreamBackendHealthcheckParameters) { + *out = *in + if in.GRPCHealthcheck != nil { + in, out := &in.GRPCHealthcheck, &out.GRPCHealthcheck + *out = make([]StreamBackendHealthcheckGRPCHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPHealthcheck != nil { + in, out := &in.HTTPHealthcheck, &out.HTTPHealthcheck + *out = make([]StreamBackendHealthcheckHTTPHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthcheckPort != nil { + in, out := &in.HealthcheckPort, &out.HealthcheckPort + *out = new(float64) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.IntervalJitterPercent != nil { + in, out := &in.IntervalJitterPercent, &out.IntervalJitterPercent + *out = new(float64) + **out = **in + } + if in.StreamHealthcheck != nil { + in, out := &in.StreamHealthcheck, &out.StreamHealthcheck + *out = make([]StreamBackendHealthcheckStreamHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckParameters. +func (in *StreamBackendHealthcheckParameters) DeepCopy() *StreamBackendHealthcheckParameters { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckStreamHealthcheckInitParameters) DeepCopyInto(out *StreamBackendHealthcheckStreamHealthcheckInitParameters) { + *out = *in + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(string) + **out = **in + } + if in.Send != nil { + in, out := &in.Send, &out.Send + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckStreamHealthcheckInitParameters. +func (in *StreamBackendHealthcheckStreamHealthcheckInitParameters) DeepCopy() *StreamBackendHealthcheckStreamHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckStreamHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckStreamHealthcheckObservation) DeepCopyInto(out *StreamBackendHealthcheckStreamHealthcheckObservation) { + *out = *in + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(string) + **out = **in + } + if in.Send != nil { + in, out := &in.Send, &out.Send + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckStreamHealthcheckObservation. +func (in *StreamBackendHealthcheckStreamHealthcheckObservation) DeepCopy() *StreamBackendHealthcheckStreamHealthcheckObservation { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckStreamHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendHealthcheckStreamHealthcheckParameters) DeepCopyInto(out *StreamBackendHealthcheckStreamHealthcheckParameters) { + *out = *in + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(string) + **out = **in + } + if in.Send != nil { + in, out := &in.Send, &out.Send + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendHealthcheckStreamHealthcheckParameters. +func (in *StreamBackendHealthcheckStreamHealthcheckParameters) DeepCopy() *StreamBackendHealthcheckStreamHealthcheckParameters { + if in == nil { + return nil + } + out := new(StreamBackendHealthcheckStreamHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendInitParameters) DeepCopyInto(out *StreamBackendInitParameters) { + *out = *in + if in.EnableProxyProtocol != nil { + in, out := &in.EnableProxyProtocol, &out.EnableProxyProtocol + *out = new(bool) + **out = **in + } + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]StreamBackendHealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancingConfig != nil { + in, out := &in.LoadBalancingConfig, &out.LoadBalancingConfig + *out = make([]StreamBackendLoadBalancingConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]StreamBackendTLSInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIds != nil { + in, out := &in.TargetGroupIds, &out.TargetGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetGroupIdsRefs != nil { + in, out := &in.TargetGroupIdsRefs, &out.TargetGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIdsSelector != nil { + in, out := &in.TargetGroupIdsSelector, &out.TargetGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendInitParameters. +func (in *StreamBackendInitParameters) DeepCopy() *StreamBackendInitParameters { + if in == nil { + return nil + } + out := new(StreamBackendInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendLoadBalancingConfigInitParameters) DeepCopyInto(out *StreamBackendLoadBalancingConfigInitParameters) { + *out = *in + if in.LocalityAwareRoutingPercent != nil { + in, out := &in.LocalityAwareRoutingPercent, &out.LocalityAwareRoutingPercent + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PanicThreshold != nil { + in, out := &in.PanicThreshold, &out.PanicThreshold + *out = new(float64) + **out = **in + } + if in.StrictLocality != nil { + in, out := &in.StrictLocality, &out.StrictLocality + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendLoadBalancingConfigInitParameters. +func (in *StreamBackendLoadBalancingConfigInitParameters) DeepCopy() *StreamBackendLoadBalancingConfigInitParameters { + if in == nil { + return nil + } + out := new(StreamBackendLoadBalancingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendLoadBalancingConfigObservation) DeepCopyInto(out *StreamBackendLoadBalancingConfigObservation) { + *out = *in + if in.LocalityAwareRoutingPercent != nil { + in, out := &in.LocalityAwareRoutingPercent, &out.LocalityAwareRoutingPercent + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PanicThreshold != nil { + in, out := &in.PanicThreshold, &out.PanicThreshold + *out = new(float64) + **out = **in + } + if in.StrictLocality != nil { + in, out := &in.StrictLocality, &out.StrictLocality + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendLoadBalancingConfigObservation. +func (in *StreamBackendLoadBalancingConfigObservation) DeepCopy() *StreamBackendLoadBalancingConfigObservation { + if in == nil { + return nil + } + out := new(StreamBackendLoadBalancingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendLoadBalancingConfigParameters) DeepCopyInto(out *StreamBackendLoadBalancingConfigParameters) { + *out = *in + if in.LocalityAwareRoutingPercent != nil { + in, out := &in.LocalityAwareRoutingPercent, &out.LocalityAwareRoutingPercent + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PanicThreshold != nil { + in, out := &in.PanicThreshold, &out.PanicThreshold + *out = new(float64) + **out = **in + } + if in.StrictLocality != nil { + in, out := &in.StrictLocality, &out.StrictLocality + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendLoadBalancingConfigParameters. +func (in *StreamBackendLoadBalancingConfigParameters) DeepCopy() *StreamBackendLoadBalancingConfigParameters { + if in == nil { + return nil + } + out := new(StreamBackendLoadBalancingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendObservation) DeepCopyInto(out *StreamBackendObservation) { + *out = *in + if in.EnableProxyProtocol != nil { + in, out := &in.EnableProxyProtocol, &out.EnableProxyProtocol + *out = new(bool) + **out = **in + } + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]StreamBackendHealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancingConfig != nil { + in, out := &in.LoadBalancingConfig, &out.LoadBalancingConfig + *out = make([]StreamBackendLoadBalancingConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]StreamBackendTLSObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIds != nil { + in, out := &in.TargetGroupIds, &out.TargetGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendObservation. +func (in *StreamBackendObservation) DeepCopy() *StreamBackendObservation { + if in == nil { + return nil + } + out := new(StreamBackendObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendParameters) DeepCopyInto(out *StreamBackendParameters) { + *out = *in + if in.EnableProxyProtocol != nil { + in, out := &in.EnableProxyProtocol, &out.EnableProxyProtocol + *out = new(bool) + **out = **in + } + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]StreamBackendHealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancingConfig != nil { + in, out := &in.LoadBalancingConfig, &out.LoadBalancingConfig + *out = make([]StreamBackendLoadBalancingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]StreamBackendTLSParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIds != nil { + in, out := &in.TargetGroupIds, &out.TargetGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetGroupIdsRefs != nil { + in, out := &in.TargetGroupIdsRefs, &out.TargetGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupIdsSelector != nil { + in, out := &in.TargetGroupIdsSelector, &out.TargetGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendParameters. +func (in *StreamBackendParameters) DeepCopy() *StreamBackendParameters { + if in == nil { + return nil + } + out := new(StreamBackendParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendTLSInitParameters) DeepCopyInto(out *StreamBackendTLSInitParameters) { + *out = *in + if in.Sni != nil { + in, out := &in.Sni, &out.Sni + *out = new(string) + **out = **in + } + if in.ValidationContext != nil { + in, out := &in.ValidationContext, &out.ValidationContext + *out = make([]StreamBackendTLSValidationContextInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendTLSInitParameters. +func (in *StreamBackendTLSInitParameters) DeepCopy() *StreamBackendTLSInitParameters { + if in == nil { + return nil + } + out := new(StreamBackendTLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendTLSObservation) DeepCopyInto(out *StreamBackendTLSObservation) { + *out = *in + if in.Sni != nil { + in, out := &in.Sni, &out.Sni + *out = new(string) + **out = **in + } + if in.ValidationContext != nil { + in, out := &in.ValidationContext, &out.ValidationContext + *out = make([]StreamBackendTLSValidationContextObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendTLSObservation. +func (in *StreamBackendTLSObservation) DeepCopy() *StreamBackendTLSObservation { + if in == nil { + return nil + } + out := new(StreamBackendTLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendTLSParameters) DeepCopyInto(out *StreamBackendTLSParameters) { + *out = *in + if in.Sni != nil { + in, out := &in.Sni, &out.Sni + *out = new(string) + **out = **in + } + if in.ValidationContext != nil { + in, out := &in.ValidationContext, &out.ValidationContext + *out = make([]StreamBackendTLSValidationContextParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendTLSParameters. +func (in *StreamBackendTLSParameters) DeepCopy() *StreamBackendTLSParameters { + if in == nil { + return nil + } + out := new(StreamBackendTLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendTLSValidationContextInitParameters) DeepCopyInto(out *StreamBackendTLSValidationContextInitParameters) { + *out = *in + if in.TrustedCABytes != nil { + in, out := &in.TrustedCABytes, &out.TrustedCABytes + *out = new(string) + **out = **in + } + if in.TrustedCAID != nil { + in, out := &in.TrustedCAID, &out.TrustedCAID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendTLSValidationContextInitParameters. +func (in *StreamBackendTLSValidationContextInitParameters) DeepCopy() *StreamBackendTLSValidationContextInitParameters { + if in == nil { + return nil + } + out := new(StreamBackendTLSValidationContextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendTLSValidationContextObservation) DeepCopyInto(out *StreamBackendTLSValidationContextObservation) { + *out = *in + if in.TrustedCABytes != nil { + in, out := &in.TrustedCABytes, &out.TrustedCABytes + *out = new(string) + **out = **in + } + if in.TrustedCAID != nil { + in, out := &in.TrustedCAID, &out.TrustedCAID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendTLSValidationContextObservation. +func (in *StreamBackendTLSValidationContextObservation) DeepCopy() *StreamBackendTLSValidationContextObservation { + if in == nil { + return nil + } + out := new(StreamBackendTLSValidationContextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamBackendTLSValidationContextParameters) DeepCopyInto(out *StreamBackendTLSValidationContextParameters) { + *out = *in + if in.TrustedCABytes != nil { + in, out := &in.TrustedCABytes, &out.TrustedCABytes + *out = new(string) + **out = **in + } + if in.TrustedCAID != nil { + in, out := &in.TrustedCAID, &out.TrustedCAID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamBackendTLSValidationContextParameters. +func (in *StreamBackendTLSValidationContextParameters) DeepCopy() *StreamBackendTLSValidationContextParameters { + if in == nil { + return nil + } + out := new(StreamBackendTLSValidationContextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamHandlerInitParameters) DeepCopyInto(out *StreamHandlerInitParameters) { + *out = *in + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamHandlerInitParameters. +func (in *StreamHandlerInitParameters) DeepCopy() *StreamHandlerInitParameters { + if in == nil { + return nil + } + out := new(StreamHandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamHandlerObservation) DeepCopyInto(out *StreamHandlerObservation) { + *out = *in + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamHandlerObservation. +func (in *StreamHandlerObservation) DeepCopy() *StreamHandlerObservation { + if in == nil { + return nil + } + out := new(StreamHandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamHandlerParameters) DeepCopyInto(out *StreamHandlerParameters) { + *out = *in + if in.BackendGroupID != nil { + in, out := &in.BackendGroupID, &out.BackendGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamHandlerParameters. +func (in *StreamHandlerParameters) DeepCopy() *StreamHandlerParameters { + if in == nil { + return nil + } + out := new(StreamHandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamHealthcheckInitParameters) DeepCopyInto(out *StreamHealthcheckInitParameters) { + *out = *in + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(string) + **out = **in + } + if in.Send != nil { + in, out := &in.Send, &out.Send + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamHealthcheckInitParameters. +func (in *StreamHealthcheckInitParameters) DeepCopy() *StreamHealthcheckInitParameters { + if in == nil { + return nil + } + out := new(StreamHealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamHealthcheckObservation) DeepCopyInto(out *StreamHealthcheckObservation) { + *out = *in + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(string) + **out = **in + } + if in.Send != nil { + in, out := &in.Send, &out.Send + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamHealthcheckObservation. +func (in *StreamHealthcheckObservation) DeepCopy() *StreamHealthcheckObservation { + if in == nil { + return nil + } + out := new(StreamHealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamHealthcheckParameters) DeepCopyInto(out *StreamHealthcheckParameters) { + *out = *in + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(string) + **out = **in + } + if in.Send != nil { + in, out := &in.Send, &out.Send + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamHealthcheckParameters. +func (in *StreamHealthcheckParameters) DeepCopy() *StreamHealthcheckParameters { + if in == nil { + return nil + } + out := new(StreamHealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInitParameters) DeepCopyInto(out *StreamInitParameters) { + *out = *in + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make([]StreamHandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInitParameters. +func (in *StreamInitParameters) DeepCopy() *StreamInitParameters { + if in == nil { + return nil + } + out := new(StreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamObservation) DeepCopyInto(out *StreamObservation) { + *out = *in + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make([]StreamHandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamObservation. +func (in *StreamObservation) DeepCopy() *StreamObservation { + if in == nil { + return nil + } + out := new(StreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamParameters) DeepCopyInto(out *StreamParameters) { + *out = *in + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make([]StreamHandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamParameters. +func (in *StreamParameters) DeepCopy() *StreamParameters { + if in == nil { + return nil + } + out := new(StreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSInitParameters) DeepCopyInto(out *TLSInitParameters) { + *out = *in + if in.Sni != nil { + in, out := &in.Sni, &out.Sni + *out = new(string) + **out = **in + } + if in.ValidationContext != nil { + in, out := &in.ValidationContext, &out.ValidationContext + *out = make([]ValidationContextInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSInitParameters. +func (in *TLSInitParameters) DeepCopy() *TLSInitParameters { + if in == nil { + return nil + } + out := new(TLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSObservation) DeepCopyInto(out *TLSObservation) { + *out = *in + if in.Sni != nil { + in, out := &in.Sni, &out.Sni + *out = new(string) + **out = **in + } + if in.ValidationContext != nil { + in, out := &in.ValidationContext, &out.ValidationContext + *out = make([]ValidationContextObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSObservation. +func (in *TLSObservation) DeepCopy() *TLSObservation { + if in == nil { + return nil + } + out := new(TLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSParameters) DeepCopyInto(out *TLSParameters) { + *out = *in + if in.Sni != nil { + in, out := &in.Sni, &out.Sni + *out = new(string) + **out = **in + } + if in.ValidationContext != nil { + in, out := &in.ValidationContext, &out.ValidationContext + *out = make([]ValidationContextParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSParameters. +func (in *TLSParameters) DeepCopy() *TLSParameters { + if in == nil { + return nil + } + out := new(TLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationContextInitParameters) DeepCopyInto(out *TLSValidationContextInitParameters) { + *out = *in + if in.TrustedCABytes != nil { + in, out := &in.TrustedCABytes, &out.TrustedCABytes + *out = new(string) + **out = **in + } + if in.TrustedCAID != nil { + in, out := &in.TrustedCAID, &out.TrustedCAID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationContextInitParameters. +func (in *TLSValidationContextInitParameters) DeepCopy() *TLSValidationContextInitParameters { + if in == nil { + return nil + } + out := new(TLSValidationContextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationContextObservation) DeepCopyInto(out *TLSValidationContextObservation) { + *out = *in + if in.TrustedCABytes != nil { + in, out := &in.TrustedCABytes, &out.TrustedCABytes + *out = new(string) + **out = **in + } + if in.TrustedCAID != nil { + in, out := &in.TrustedCAID, &out.TrustedCAID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationContextObservation. +func (in *TLSValidationContextObservation) DeepCopy() *TLSValidationContextObservation { + if in == nil { + return nil + } + out := new(TLSValidationContextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationContextParameters) DeepCopyInto(out *TLSValidationContextParameters) { + *out = *in + if in.TrustedCABytes != nil { + in, out := &in.TrustedCABytes, &out.TrustedCABytes + *out = new(string) + **out = **in + } + if in.TrustedCAID != nil { + in, out := &in.TrustedCAID, &out.TrustedCAID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationContextParameters. +func (in *TLSValidationContextParameters) DeepCopy() *TLSValidationContextParameters { + if in == nil { + return nil + } + out := new(TLSValidationContextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroup) DeepCopyInto(out *TargetGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroup. +func (in *TargetGroup) DeepCopy() *TargetGroup { + if in == nil { + return nil + } + out := new(TargetGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TargetGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupInitParameters) DeepCopyInto(out *TargetGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]TargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupInitParameters. +func (in *TargetGroupInitParameters) DeepCopy() *TargetGroupInitParameters { + if in == nil { + return nil + } + out := new(TargetGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupList) DeepCopyInto(out *TargetGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TargetGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupList. +func (in *TargetGroupList) DeepCopy() *TargetGroupList { + if in == nil { + return nil + } + out := new(TargetGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TargetGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupObservation) DeepCopyInto(out *TargetGroupObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]TargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupObservation. +func (in *TargetGroupObservation) DeepCopy() *TargetGroupObservation { + if in == nil { + return nil + } + out := new(TargetGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupParameters) DeepCopyInto(out *TargetGroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]TargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupParameters. +func (in *TargetGroupParameters) DeepCopy() *TargetGroupParameters { + if in == nil { + return nil + } + out := new(TargetGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupSpec) DeepCopyInto(out *TargetGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupSpec. +func (in *TargetGroupSpec) DeepCopy() *TargetGroupSpec { + if in == nil { + return nil + } + out := new(TargetGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupStatus) DeepCopyInto(out *TargetGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupStatus. +func (in *TargetGroupStatus) DeepCopy() *TargetGroupStatus { + if in == nil { + return nil + } + out := new(TargetGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetInitParameters) DeepCopyInto(out *TargetInitParameters) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPv4Address != nil { + in, out := &in.PrivateIPv4Address, &out.PrivateIPv4Address + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetInitParameters. +func (in *TargetInitParameters) DeepCopy() *TargetInitParameters { + if in == nil { + return nil + } + out := new(TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObservation) DeepCopyInto(out *TargetObservation) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPv4Address != nil { + in, out := &in.PrivateIPv4Address, &out.PrivateIPv4Address + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObservation. +func (in *TargetObservation) DeepCopy() *TargetObservation { + if in == nil { + return nil + } + out := new(TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetParameters) DeepCopyInto(out *TargetParameters) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPv4Address != nil { + in, out := &in.PrivateIPv4Address, &out.PrivateIPv4Address + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetParameters. +func (in *TargetParameters) DeepCopy() *TargetParameters { + if in == nil { + return nil + } + out := new(TargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationContextInitParameters) DeepCopyInto(out *ValidationContextInitParameters) { + *out = *in + if in.TrustedCABytes != nil { + in, out := &in.TrustedCABytes, &out.TrustedCABytes + *out = new(string) + **out = **in + } + if in.TrustedCAID != nil { + in, out := &in.TrustedCAID, &out.TrustedCAID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationContextInitParameters. +func (in *ValidationContextInitParameters) DeepCopy() *ValidationContextInitParameters { + if in == nil { + return nil + } + out := new(ValidationContextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationContextObservation) DeepCopyInto(out *ValidationContextObservation) { + *out = *in + if in.TrustedCABytes != nil { + in, out := &in.TrustedCABytes, &out.TrustedCABytes + *out = new(string) + **out = **in + } + if in.TrustedCAID != nil { + in, out := &in.TrustedCAID, &out.TrustedCAID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationContextObservation. +func (in *ValidationContextObservation) DeepCopy() *ValidationContextObservation { + if in == nil { + return nil + } + out := new(ValidationContextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationContextParameters) DeepCopyInto(out *ValidationContextParameters) { + *out = *in + if in.TrustedCABytes != nil { + in, out := &in.TrustedCABytes, &out.TrustedCABytes + *out = new(string) + **out = **in + } + if in.TrustedCAID != nil { + in, out := &in.TrustedCAID, &out.TrustedCAID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationContextParameters. +func (in *ValidationContextParameters) DeepCopy() *ValidationContextParameters { + if in == nil { + return nil + } + out := new(ValidationContextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueInitParameters) DeepCopyInto(out *ValueInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueInitParameters. +func (in *ValueInitParameters) DeepCopy() *ValueInitParameters { + if in == nil { + return nil + } + out := new(ValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueObservation) DeepCopyInto(out *ValueObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueObservation. +func (in *ValueObservation) DeepCopy() *ValueObservation { + if in == nil { + return nil + } + out := new(ValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueParameters) DeepCopyInto(out *ValueParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueParameters. +func (in *ValueParameters) DeepCopy() *ValueParameters { + if in == nil { + return nil + } + out := new(ValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHost) DeepCopyInto(out *VirtualHost) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHost. +func (in *VirtualHost) DeepCopy() *VirtualHost { + if in == nil { + return nil + } + out := new(VirtualHost) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualHost) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostInitParameters) DeepCopyInto(out *VirtualHostInitParameters) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.HTTPRouterIDRef != nil { + in, out := &in.HTTPRouterIDRef, &out.HTTPRouterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouterIDSelector != nil { + in, out := &in.HTTPRouterIDSelector, &out.HTTPRouterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ModifyRequestHeaders != nil { + in, out := &in.ModifyRequestHeaders, &out.ModifyRequestHeaders + *out = make([]ModifyRequestHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyResponseHeaders != nil { + in, out := &in.ModifyResponseHeaders, &out.ModifyResponseHeaders + *out = make([]ModifyResponseHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]RouteInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RouteOptions != nil { + in, out := &in.RouteOptions, &out.RouteOptions + *out = make([]VirtualHostRouteOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostInitParameters. +func (in *VirtualHostInitParameters) DeepCopy() *VirtualHostInitParameters { + if in == nil { + return nil + } + out := new(VirtualHostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostList) DeepCopyInto(out *VirtualHostList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualHost, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostList. +func (in *VirtualHostList) DeepCopy() *VirtualHostList { + if in == nil { + return nil + } + out := new(VirtualHostList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualHostList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostObservation) DeepCopyInto(out *VirtualHostObservation) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ModifyRequestHeaders != nil { + in, out := &in.ModifyRequestHeaders, &out.ModifyRequestHeaders + *out = make([]ModifyRequestHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyResponseHeaders != nil { + in, out := &in.ModifyResponseHeaders, &out.ModifyResponseHeaders + *out = make([]ModifyResponseHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]RouteObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RouteOptions != nil { + in, out := &in.RouteOptions, &out.RouteOptions + *out = make([]VirtualHostRouteOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostObservation. +func (in *VirtualHostObservation) DeepCopy() *VirtualHostObservation { + if in == nil { + return nil + } + out := new(VirtualHostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostParameters) DeepCopyInto(out *VirtualHostParameters) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPRouterID != nil { + in, out := &in.HTTPRouterID, &out.HTTPRouterID + *out = new(string) + **out = **in + } + if in.HTTPRouterIDRef != nil { + in, out := &in.HTTPRouterIDRef, &out.HTTPRouterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouterIDSelector != nil { + in, out := &in.HTTPRouterIDSelector, &out.HTTPRouterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ModifyRequestHeaders != nil { + in, out := &in.ModifyRequestHeaders, &out.ModifyRequestHeaders + *out = make([]ModifyRequestHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyResponseHeaders != nil { + in, out := &in.ModifyResponseHeaders, &out.ModifyResponseHeaders + *out = make([]ModifyResponseHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]RouteParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RouteOptions != nil { + in, out := &in.RouteOptions, &out.RouteOptions + *out = make([]VirtualHostRouteOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostParameters. +func (in *VirtualHostParameters) DeepCopy() *VirtualHostParameters { + if in == nil { + return nil + } + out := new(VirtualHostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostRouteOptionsInitParameters) DeepCopyInto(out *VirtualHostRouteOptionsInitParameters) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = make([]VirtualHostRouteOptionsRbacInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityProfileID != nil { + in, out := &in.SecurityProfileID, &out.SecurityProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostRouteOptionsInitParameters. +func (in *VirtualHostRouteOptionsInitParameters) DeepCopy() *VirtualHostRouteOptionsInitParameters { + if in == nil { + return nil + } + out := new(VirtualHostRouteOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostRouteOptionsObservation) DeepCopyInto(out *VirtualHostRouteOptionsObservation) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = make([]VirtualHostRouteOptionsRbacObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityProfileID != nil { + in, out := &in.SecurityProfileID, &out.SecurityProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostRouteOptionsObservation. +func (in *VirtualHostRouteOptionsObservation) DeepCopy() *VirtualHostRouteOptionsObservation { + if in == nil { + return nil + } + out := new(VirtualHostRouteOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostRouteOptionsParameters) DeepCopyInto(out *VirtualHostRouteOptionsParameters) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = make([]VirtualHostRouteOptionsRbacParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityProfileID != nil { + in, out := &in.SecurityProfileID, &out.SecurityProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostRouteOptionsParameters. +func (in *VirtualHostRouteOptionsParameters) DeepCopy() *VirtualHostRouteOptionsParameters { + if in == nil { + return nil + } + out := new(VirtualHostRouteOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostRouteOptionsRbacInitParameters) DeepCopyInto(out *VirtualHostRouteOptionsRbacInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Principals != nil { + in, out := &in.Principals, &out.Principals + *out = make([]RouteOptionsRbacPrincipalsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostRouteOptionsRbacInitParameters. +func (in *VirtualHostRouteOptionsRbacInitParameters) DeepCopy() *VirtualHostRouteOptionsRbacInitParameters { + if in == nil { + return nil + } + out := new(VirtualHostRouteOptionsRbacInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostRouteOptionsRbacObservation) DeepCopyInto(out *VirtualHostRouteOptionsRbacObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Principals != nil { + in, out := &in.Principals, &out.Principals + *out = make([]RouteOptionsRbacPrincipalsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostRouteOptionsRbacObservation. +func (in *VirtualHostRouteOptionsRbacObservation) DeepCopy() *VirtualHostRouteOptionsRbacObservation { + if in == nil { + return nil + } + out := new(VirtualHostRouteOptionsRbacObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostRouteOptionsRbacParameters) DeepCopyInto(out *VirtualHostRouteOptionsRbacParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Principals != nil { + in, out := &in.Principals, &out.Principals + *out = make([]RouteOptionsRbacPrincipalsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostRouteOptionsRbacParameters. +func (in *VirtualHostRouteOptionsRbacParameters) DeepCopy() *VirtualHostRouteOptionsRbacParameters { + if in == nil { + return nil + } + out := new(VirtualHostRouteOptionsRbacParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostSpec) DeepCopyInto(out *VirtualHostSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostSpec. +func (in *VirtualHostSpec) DeepCopy() *VirtualHostSpec { + if in == nil { + return nil + } + out := new(VirtualHostSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHostStatus) DeepCopyInto(out *VirtualHostStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHostStatus. +func (in *VirtualHostStatus) DeepCopy() *VirtualHostStatus { + if in == nil { + return nil + } + out := new(VirtualHostStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/alb/v1alpha1/zz_generated.resolvers.go b/apis/alb/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..cba2d5d --- /dev/null +++ b/apis/alb/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,655 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this BackendGroup. +func (mg *BackendGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.GRPCBackend); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.GRPCBackend[i3].TargetGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.GRPCBackend[i3].TargetGroupIdsRefs, + Selector: mg.Spec.ForProvider.GRPCBackend[i3].TargetGroupIdsSelector, + To: reference.To{ + List: &TargetGroupList{}, + Managed: &TargetGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GRPCBackend[i3].TargetGroupIds") + } + mg.Spec.ForProvider.GRPCBackend[i3].TargetGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.GRPCBackend[i3].TargetGroupIdsRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.HTTPBackend); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.HTTPBackend[i3].TargetGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.HTTPBackend[i3].TargetGroupIdsRefs, + Selector: mg.Spec.ForProvider.HTTPBackend[i3].TargetGroupIdsSelector, + To: reference.To{ + List: &TargetGroupList{}, + Managed: &TargetGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HTTPBackend[i3].TargetGroupIds") + } + mg.Spec.ForProvider.HTTPBackend[i3].TargetGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.HTTPBackend[i3].TargetGroupIdsRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.StreamBackend); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.StreamBackend[i3].TargetGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.StreamBackend[i3].TargetGroupIdsRefs, + Selector: mg.Spec.ForProvider.StreamBackend[i3].TargetGroupIdsSelector, + To: reference.To{ + List: &TargetGroupList{}, + Managed: &TargetGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StreamBackend[i3].TargetGroupIds") + } + mg.Spec.ForProvider.StreamBackend[i3].TargetGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.StreamBackend[i3].TargetGroupIdsRefs = mrsp.ResolvedReferences + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.GRPCBackend); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.GRPCBackend[i3].TargetGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.GRPCBackend[i3].TargetGroupIdsRefs, + Selector: mg.Spec.InitProvider.GRPCBackend[i3].TargetGroupIdsSelector, + To: reference.To{ + List: &TargetGroupList{}, + Managed: &TargetGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GRPCBackend[i3].TargetGroupIds") + } + mg.Spec.InitProvider.GRPCBackend[i3].TargetGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.GRPCBackend[i3].TargetGroupIdsRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.HTTPBackend); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.HTTPBackend[i3].TargetGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.HTTPBackend[i3].TargetGroupIdsRefs, + Selector: mg.Spec.InitProvider.HTTPBackend[i3].TargetGroupIdsSelector, + To: reference.To{ + List: &TargetGroupList{}, + Managed: &TargetGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HTTPBackend[i3].TargetGroupIds") + } + mg.Spec.InitProvider.HTTPBackend[i3].TargetGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.HTTPBackend[i3].TargetGroupIdsRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.StreamBackend); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.StreamBackend[i3].TargetGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.StreamBackend[i3].TargetGroupIdsRefs, + Selector: mg.Spec.InitProvider.StreamBackend[i3].TargetGroupIdsSelector, + To: reference.To{ + List: &TargetGroupList{}, + Managed: &TargetGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StreamBackend[i3].TargetGroupIds") + } + mg.Spec.InitProvider.StreamBackend[i3].TargetGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.StreamBackend[i3].TargetGroupIdsRefs = mrsp.ResolvedReferences + + } + + return nil +} + +// ResolveReferences of this HTTPRouter. +func (mg *HTTPRouter) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LoadBalancer. +func (mg *LoadBalancer) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.AllocationPolicy); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.AllocationPolicy[i3].Location); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetID") + } + mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Listener); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Listener[i3].Endpoint); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Listener[i3].Endpoint[i4].Address); i5++ { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetIDRef, + Selector: mg.Spec.ForProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetID") + } + mg.Spec.ForProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Listener); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Listener[i3].HTTP); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Listener[i3].HTTP[i4].Handler); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterIDRef, + Selector: mg.Spec.ForProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterIDSelector, + To: reference.To{ + List: &HTTPRouterList{}, + Managed: &HTTPRouter{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterID") + } + mg.Spec.ForProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterIDRef = rsp.ResolvedReference + + } + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + for i3 := 0; i3 < len(mg.Spec.InitProvider.AllocationPolicy); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.AllocationPolicy[i3].Location); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetID") + } + mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Listener); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Listener[i3].Endpoint); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Listener[i3].Endpoint[i4].Address); i5++ { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetIDRef, + Selector: mg.Spec.InitProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetID") + } + mg.Spec.InitProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Listener[i3].Endpoint[i4].Address[i5].InternalIPv4Address[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Listener); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Listener[i3].HTTP); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Listener[i3].HTTP[i4].Handler); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterIDRef, + Selector: mg.Spec.InitProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterIDSelector, + To: reference.To{ + List: &HTTPRouterList{}, + Managed: &HTTPRouter{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterID") + } + mg.Spec.InitProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Listener[i3].HTTP[i4].Handler[i5].HTTPRouterIDRef = rsp.ResolvedReference + + } + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this TargetGroup. +func (mg *TargetGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Target); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Target[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Target[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.Target[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Target[i3].SubnetID") + } + mg.Spec.ForProvider.Target[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Target[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Target); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Target[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Target[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.Target[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Target[i3].SubnetID") + } + mg.Spec.InitProvider.Target[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Target[i3].SubnetIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this VirtualHost. +func (mg *VirtualHost) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HTTPRouterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.HTTPRouterIDRef, + Selector: mg.Spec.ForProvider.HTTPRouterIDSelector, + To: reference.To{ + List: &HTTPRouterList{}, + Managed: &HTTPRouter{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HTTPRouterID") + } + mg.Spec.ForProvider.HTTPRouterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HTTPRouterIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Route); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Route[i3].GRPCRoute); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupIDRef, + Selector: mg.Spec.ForProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupIDSelector, + To: reference.To{ + List: &BackendGroupList{}, + Managed: &BackendGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupID") + } + mg.Spec.ForProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Route); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Route[i3].HTTPRoute); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupIDRef, + Selector: mg.Spec.ForProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupIDSelector, + To: reference.To{ + List: &BackendGroupList{}, + Managed: &BackendGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupID") + } + mg.Spec.ForProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupIDRef = rsp.ResolvedReference + + } + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HTTPRouterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.HTTPRouterIDRef, + Selector: mg.Spec.InitProvider.HTTPRouterIDSelector, + To: reference.To{ + List: &HTTPRouterList{}, + Managed: &HTTPRouter{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HTTPRouterID") + } + mg.Spec.InitProvider.HTTPRouterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HTTPRouterIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Route); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Route[i3].GRPCRoute); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupIDRef, + Selector: mg.Spec.InitProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupIDSelector, + To: reference.To{ + List: &BackendGroupList{}, + Managed: &BackendGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupID") + } + mg.Spec.InitProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Route[i3].GRPCRoute[i4].GRPCRouteAction[i5].BackendGroupIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Route); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Route[i3].HTTPRoute); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupIDRef, + Selector: mg.Spec.InitProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupIDSelector, + To: reference.To{ + List: &BackendGroupList{}, + Managed: &BackendGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupID") + } + mg.Spec.InitProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Route[i3].HTTPRoute[i4].HTTPRouteAction[i5].BackendGroupIDRef = rsp.ResolvedReference + + } + } + } + + return nil +} diff --git a/apis/alb/v1alpha1/zz_groupversion_info.go b/apis/alb/v1alpha1/zz_groupversion_info.go index 2d943dd..acffde2 100755 --- a/apis/alb/v1alpha1/zz_groupversion_info.go +++ b/apis/alb/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/alb/v1alpha1/zz_httprouter_terraformed.go b/apis/alb/v1alpha1/zz_httprouter_terraformed.go index 2f00d44..8695cdc 100755 --- a/apis/alb/v1alpha1/zz_httprouter_terraformed.go +++ b/apis/alb/v1alpha1/zz_httprouter_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this HTTPRouter func (mg *HTTPRouter) GetTerraformResourceType() string { - return "yandex_alb_http_router" + return "yandex_alb_http_router" } // GetConnectionDetailsMapping for this HTTPRouter func (tr *HTTPRouter) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this HTTPRouter func (tr *HTTPRouter) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this HTTPRouter func (tr *HTTPRouter) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this HTTPRouter func (tr *HTTPRouter) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this HTTPRouter func (tr *HTTPRouter) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this HTTPRouter func (tr *HTTPRouter) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this HTTPRouter func (tr *HTTPRouter) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this HTTPRouter func (tr *HTTPRouter) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this HTTPRouter using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *HTTPRouter) LateInitialize(attrs []byte) (bool, error) { - params := &HTTPRouterParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &HTTPRouterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *HTTPRouter) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/alb/v1alpha1/zz_httprouter_types.go b/apis/alb/v1alpha1/zz_httprouter_types.go index fe71e63..2306ce2 100755 --- a/apis/alb/v1alpha1/zz_httprouter_types.go +++ b/apis/alb/v1alpha1/zz_httprouter_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,291 +7,236 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AndPrincipalsHeaderInitParameters struct { + // Name of the HTTP Router. Provided by the client when the HTTP Router is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the HTTP Router. Provided by the client when the HTTP Router is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []ValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + Value []ValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type AndPrincipalsHeaderObservation struct { + // Name of the HTTP Router. Provided by the client when the HTTP Router is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the HTTP Router. Provided by the client when the HTTP Router is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []ValueObservation `json:"value,omitempty" tf:"value,omitempty"` + Value []ValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type AndPrincipalsHeaderParameters struct { + // Name of the HTTP Router. Provided by the client when the HTTP Router is created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// Name of the HTTP Router. Provided by the client when the HTTP Router is created. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// +kubebuilder:validation:Optional -Value []ValueParameters `json:"value,omitempty" tf:"value,omitempty"` + // +kubebuilder:validation:Optional + Value []ValueParameters `json:"value,omitempty" tf:"value,omitempty"` } - type AndPrincipalsInitParameters struct { + Any *bool `json:"any,omitempty" tf:"any,omitempty"` + Header []AndPrincipalsHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` -Any *bool `json:"any,omitempty" tf:"any,omitempty"` - -Header []AndPrincipalsHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` - -RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` + RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` } - type AndPrincipalsObservation struct { + Any *bool `json:"any,omitempty" tf:"any,omitempty"` + Header []AndPrincipalsHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` -Any *bool `json:"any,omitempty" tf:"any,omitempty"` - -Header []AndPrincipalsHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` - -RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` + RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` } - type AndPrincipalsParameters struct { + // +kubebuilder:validation:Optional + Any *bool `json:"any,omitempty" tf:"any,omitempty"` -// +kubebuilder:validation:Optional -Any *bool `json:"any,omitempty" tf:"any,omitempty"` - -// +kubebuilder:validation:Optional -Header []AndPrincipalsHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + // +kubebuilder:validation:Optional + Header []AndPrincipalsHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` -// +kubebuilder:validation:Optional -RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` + // +kubebuilder:validation:Optional + RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` } - type HTTPRouterInitParameters struct { + // An optional description of the HTTP Router. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// An optional description of the HTTP Router. Provide this property when you create the resource. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Labels to assign to this HTTP Router. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this HTTP Router. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the HTTP Router. Provided by the client when the HTTP Router is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the HTTP Router. Provided by the client when the HTTP Router is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -RouteOptions []RouteOptionsInitParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` + RouteOptions []RouteOptionsInitParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` } - type HTTPRouterObservation struct { + // The HTTP Router creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// The HTTP Router creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // An optional description of the HTTP Router. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// An optional description of the HTTP Router. Provide this property when you create the resource. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the HTTP Router. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// The ID of the HTTP Router. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Labels to assign to this HTTP Router. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Labels to assign to this HTTP Router. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name of the HTTP Router. Provided by the client when the HTTP Router is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the HTTP Router. Provided by the client when the HTTP Router is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -RouteOptions []RouteOptionsObservation `json:"routeOptions,omitempty" tf:"route_options,omitempty"` + RouteOptions []RouteOptionsObservation `json:"routeOptions,omitempty" tf:"route_options,omitempty"` } - type HTTPRouterParameters struct { + // An optional description of the HTTP Router. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// An optional description of the HTTP Router. Provide this property when you create the resource. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Labels to assign to this HTTP Router. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Labels to assign to this HTTP Router. A list of key/value pairs. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name of the HTTP Router. Provided by the client when the HTTP Router is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the HTTP Router. Provided by the client when the HTTP Router is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// +kubebuilder:validation:Optional -RouteOptions []RouteOptionsParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` + // +kubebuilder:validation:Optional + RouteOptions []RouteOptionsParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` } - type PrincipalsInitParameters struct { - - -AndPrincipals []AndPrincipalsInitParameters `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` + AndPrincipals []AndPrincipalsInitParameters `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` } - type PrincipalsObservation struct { - - -AndPrincipals []AndPrincipalsObservation `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` + AndPrincipals []AndPrincipalsObservation `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` } - type PrincipalsParameters struct { - -// +kubebuilder:validation:Optional -AndPrincipals []AndPrincipalsParameters `json:"andPrincipals" tf:"and_principals,omitempty"` + // +kubebuilder:validation:Optional + AndPrincipals []AndPrincipalsParameters `json:"andPrincipals" tf:"and_principals,omitempty"` } - type RbacInitParameters struct { + Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Principals []PrincipalsInitParameters `json:"principals,omitempty" tf:"principals,omitempty"` + Principals []PrincipalsInitParameters `json:"principals,omitempty" tf:"principals,omitempty"` } - type RbacObservation struct { + Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Principals []PrincipalsObservation `json:"principals,omitempty" tf:"principals,omitempty"` + Principals []PrincipalsObservation `json:"principals,omitempty" tf:"principals,omitempty"` } - type RbacParameters struct { + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` -// +kubebuilder:validation:Optional -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -// +kubebuilder:validation:Optional -Principals []PrincipalsParameters `json:"principals" tf:"principals,omitempty"` + // +kubebuilder:validation:Optional + Principals []PrincipalsParameters `json:"principals" tf:"principals,omitempty"` } - type RouteOptionsInitParameters struct { + Rbac []RbacInitParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` - -Rbac []RbacInitParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` - -// The ID of the HTTP Router. -SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` + // The ID of the HTTP Router. + SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` } - type RouteOptionsObservation struct { + Rbac []RbacObservation `json:"rbac,omitempty" tf:"rbac,omitempty"` - -Rbac []RbacObservation `json:"rbac,omitempty" tf:"rbac,omitempty"` - -// The ID of the HTTP Router. -SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` + // The ID of the HTTP Router. + SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` } - type RouteOptionsParameters struct { + // +kubebuilder:validation:Optional + Rbac []RbacParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` -// +kubebuilder:validation:Optional -Rbac []RbacParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` - -// The ID of the HTTP Router. -// +kubebuilder:validation:Optional -SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` + // The ID of the HTTP Router. + // +kubebuilder:validation:Optional + SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` } - type ValueInitParameters struct { + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` - -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type ValueObservation struct { + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` - -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type ValueParameters struct { + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// +kubebuilder:validation:Optional -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// +kubebuilder:validation:Optional -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } // HTTPRouterSpec defines the desired state of HTTPRouter type HTTPRouterSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider HTTPRouterParameters `json:"forProvider"` + ForProvider HTTPRouterParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -304,20 +247,19 @@ type HTTPRouterSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider HTTPRouterInitParameters `json:"initProvider,omitempty"` + InitProvider HTTPRouterInitParameters `json:"initProvider,omitempty"` } // HTTPRouterStatus defines the observed state of HTTPRouter. type HTTPRouterStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider HTTPRouterObservation `json:"atProvider,omitempty"` + AtProvider HTTPRouterObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // HTTPRouter is the Schema for the HTTPRouters API. The HTTP router defines the routing rules for HTTP requests to backend groups. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/alb/v1alpha1/zz_loadbalancer_terraformed.go b/apis/alb/v1alpha1/zz_loadbalancer_terraformed.go index 30647ba..2f122f1 100755 --- a/apis/alb/v1alpha1/zz_loadbalancer_terraformed.go +++ b/apis/alb/v1alpha1/zz_loadbalancer_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this LoadBalancer func (mg *LoadBalancer) GetTerraformResourceType() string { - return "yandex_alb_load_balancer" + return "yandex_alb_load_balancer" } // GetConnectionDetailsMapping for this LoadBalancer func (tr *LoadBalancer) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this LoadBalancer func (tr *LoadBalancer) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this LoadBalancer func (tr *LoadBalancer) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this LoadBalancer func (tr *LoadBalancer) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this LoadBalancer func (tr *LoadBalancer) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this LoadBalancer func (tr *LoadBalancer) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this LoadBalancer func (tr *LoadBalancer) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this LoadBalancer func (tr *LoadBalancer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this LoadBalancer using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *LoadBalancer) LateInitialize(attrs []byte) (bool, error) { - params := &LoadBalancerParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &LoadBalancerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *LoadBalancer) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/alb/v1alpha1/zz_loadbalancer_types.go b/apis/alb/v1alpha1/zz_loadbalancer_types.go index a8dfe26..4b5ae4c 100755 --- a/apis/alb/v1alpha1/zz_loadbalancer_types.go +++ b/apis/alb/v1alpha1/zz_loadbalancer_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,1232 +7,1065 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AddressInitParameters struct { + // External IPv4 address. The structure is documented below. + ExternalIPv4Address []ExternalIPv4AddressInitParameters `json:"externalIpv4Address,omitempty" tf:"external_ipv4_address,omitempty"` -// External IPv4 address. The structure is documented below. -ExternalIPv4Address []ExternalIPv4AddressInitParameters `json:"externalIpv4Address,omitempty" tf:"external_ipv4_address,omitempty"` + // External IPv6 address. The structure is documented below. + ExternalIPv6Address []ExternalIPv6AddressInitParameters `json:"externalIpv6Address,omitempty" tf:"external_ipv6_address,omitempty"` -// External IPv6 address. The structure is documented below. -ExternalIPv6Address []ExternalIPv6AddressInitParameters `json:"externalIpv6Address,omitempty" tf:"external_ipv6_address,omitempty"` - -// Internal IPv4 address. The structure is documented below. -InternalIPv4Address []InternalIPv4AddressInitParameters `json:"internalIpv4Address,omitempty" tf:"internal_ipv4_address,omitempty"` + // Internal IPv4 address. The structure is documented below. + InternalIPv4Address []InternalIPv4AddressInitParameters `json:"internalIpv4Address,omitempty" tf:"internal_ipv4_address,omitempty"` } - type AddressObservation struct { + // External IPv4 address. The structure is documented below. + ExternalIPv4Address []ExternalIPv4AddressObservation `json:"externalIpv4Address,omitempty" tf:"external_ipv4_address,omitempty"` -// External IPv4 address. The structure is documented below. -ExternalIPv4Address []ExternalIPv4AddressObservation `json:"externalIpv4Address,omitempty" tf:"external_ipv4_address,omitempty"` - -// External IPv6 address. The structure is documented below. -ExternalIPv6Address []ExternalIPv6AddressObservation `json:"externalIpv6Address,omitempty" tf:"external_ipv6_address,omitempty"` + // External IPv6 address. The structure is documented below. + ExternalIPv6Address []ExternalIPv6AddressObservation `json:"externalIpv6Address,omitempty" tf:"external_ipv6_address,omitempty"` -// Internal IPv4 address. The structure is documented below. -InternalIPv4Address []InternalIPv4AddressObservation `json:"internalIpv4Address,omitempty" tf:"internal_ipv4_address,omitempty"` + // Internal IPv4 address. The structure is documented below. + InternalIPv4Address []InternalIPv4AddressObservation `json:"internalIpv4Address,omitempty" tf:"internal_ipv4_address,omitempty"` } - type AddressParameters struct { + // External IPv4 address. The structure is documented below. + // +kubebuilder:validation:Optional + ExternalIPv4Address []ExternalIPv4AddressParameters `json:"externalIpv4Address,omitempty" tf:"external_ipv4_address,omitempty"` -// External IPv4 address. The structure is documented below. -// +kubebuilder:validation:Optional -ExternalIPv4Address []ExternalIPv4AddressParameters `json:"externalIpv4Address,omitempty" tf:"external_ipv4_address,omitempty"` - -// External IPv6 address. The structure is documented below. -// +kubebuilder:validation:Optional -ExternalIPv6Address []ExternalIPv6AddressParameters `json:"externalIpv6Address,omitempty" tf:"external_ipv6_address,omitempty"` + // External IPv6 address. The structure is documented below. + // +kubebuilder:validation:Optional + ExternalIPv6Address []ExternalIPv6AddressParameters `json:"externalIpv6Address,omitempty" tf:"external_ipv6_address,omitempty"` -// Internal IPv4 address. The structure is documented below. -// +kubebuilder:validation:Optional -InternalIPv4Address []InternalIPv4AddressParameters `json:"internalIpv4Address,omitempty" tf:"internal_ipv4_address,omitempty"` + // Internal IPv4 address. The structure is documented below. + // +kubebuilder:validation:Optional + InternalIPv4Address []InternalIPv4AddressParameters `json:"internalIpv4Address,omitempty" tf:"internal_ipv4_address,omitempty"` } - type AllocationPolicyInitParameters struct { - -// Unique set of locations. The structure is documented below. -Location []LocationInitParameters `json:"location,omitempty" tf:"location,omitempty"` + // Unique set of locations. The structure is documented below. + Location []LocationInitParameters `json:"location,omitempty" tf:"location,omitempty"` } - type AllocationPolicyObservation struct { - -// Unique set of locations. The structure is documented below. -Location []LocationObservation `json:"location,omitempty" tf:"location,omitempty"` + // Unique set of locations. The structure is documented below. + Location []LocationObservation `json:"location,omitempty" tf:"location,omitempty"` } - type AllocationPolicyParameters struct { - -// Unique set of locations. The structure is documented below. -// +kubebuilder:validation:Optional -Location []LocationParameters `json:"location" tf:"location,omitempty"` + // Unique set of locations. The structure is documented below. + // +kubebuilder:validation:Optional + Location []LocationParameters `json:"location" tf:"location,omitempty"` } - type DefaultHandlerInitParameters struct { + // Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. + // +listType=set + CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` -// Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. -// +listType=set -CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` - -// HTTP handler resource. The structure is documented below. -HTTPHandler []HTTPHandlerInitParameters `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` + // HTTP handler resource. The structure is documented below. + HTTPHandler []HTTPHandlerInitParameters `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` -// Stream handler resource. The structure is documented below. -StreamHandler []DefaultHandlerStreamHandlerInitParameters `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` + // Stream handler resource. The structure is documented below. + StreamHandler []DefaultHandlerStreamHandlerInitParameters `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` } - type DefaultHandlerObservation struct { + // Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. + // +listType=set + CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` -// Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. -// +listType=set -CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` + // HTTP handler resource. The structure is documented below. + HTTPHandler []HTTPHandlerObservation `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` -// HTTP handler resource. The structure is documented below. -HTTPHandler []HTTPHandlerObservation `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` - -// Stream handler resource. The structure is documented below. -StreamHandler []DefaultHandlerStreamHandlerObservation `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` + // Stream handler resource. The structure is documented below. + StreamHandler []DefaultHandlerStreamHandlerObservation `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` } - type DefaultHandlerParameters struct { + // Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. + // +kubebuilder:validation:Optional + // +listType=set + CertificateIds []*string `json:"certificateIds" tf:"certificate_ids,omitempty"` -// Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. -// +kubebuilder:validation:Optional -// +listType=set -CertificateIds []*string `json:"certificateIds" tf:"certificate_ids,omitempty"` + // HTTP handler resource. The structure is documented below. + // +kubebuilder:validation:Optional + HTTPHandler []HTTPHandlerParameters `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` -// HTTP handler resource. The structure is documented below. -// +kubebuilder:validation:Optional -HTTPHandler []HTTPHandlerParameters `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` - -// Stream handler resource. The structure is documented below. -// +kubebuilder:validation:Optional -StreamHandler []DefaultHandlerStreamHandlerParameters `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` + // Stream handler resource. The structure is documented below. + // +kubebuilder:validation:Optional + StreamHandler []DefaultHandlerStreamHandlerParameters `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` } - type DefaultHandlerStreamHandlerInitParameters struct { - -// Backend group id. -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group id. + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` } - type DefaultHandlerStreamHandlerObservation struct { - -// Backend group id. -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group id. + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` } - type DefaultHandlerStreamHandlerParameters struct { - -// Backend group id. -// +kubebuilder:validation:Optional -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group id. + // +kubebuilder:validation:Optional + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` } - type DiscardRuleInitParameters struct { + DiscardPercent *float64 `json:"discardPercent,omitempty" tf:"discard_percent,omitempty"` + // list of grpc codes by name, e.g, ["NOT_FOUND", "RESOURCE_EXHAUSTED"] + GRPCCodes []*string `json:"grpcCodes,omitempty" tf:"grpc_codes,omitempty"` -DiscardPercent *float64 `json:"discardPercent,omitempty" tf:"discard_percent,omitempty"` - -// list of grpc codes by name, e.g, ["NOT_FOUND", "RESOURCE_EXHAUSTED"] -GRPCCodes []*string `json:"grpcCodes,omitempty" tf:"grpc_codes,omitempty"` + // 5XX or ALL + HTTPCodeIntervals []*string `json:"httpCodeIntervals,omitempty" tf:"http_code_intervals,omitempty"` -// 5XX or ALL -HTTPCodeIntervals []*string `json:"httpCodeIntervals,omitempty" tf:"http_code_intervals,omitempty"` - -// 599 -HTTPCodes []*float64 `json:"httpCodes,omitempty" tf:"http_codes,omitempty"` + // 599 + HTTPCodes []*float64 `json:"httpCodes,omitempty" tf:"http_codes,omitempty"` } - type DiscardRuleObservation struct { + DiscardPercent *float64 `json:"discardPercent,omitempty" tf:"discard_percent,omitempty"` + // list of grpc codes by name, e.g, ["NOT_FOUND", "RESOURCE_EXHAUSTED"] + GRPCCodes []*string `json:"grpcCodes,omitempty" tf:"grpc_codes,omitempty"` -DiscardPercent *float64 `json:"discardPercent,omitempty" tf:"discard_percent,omitempty"` + // 5XX or ALL + HTTPCodeIntervals []*string `json:"httpCodeIntervals,omitempty" tf:"http_code_intervals,omitempty"` -// list of grpc codes by name, e.g, ["NOT_FOUND", "RESOURCE_EXHAUSTED"] -GRPCCodes []*string `json:"grpcCodes,omitempty" tf:"grpc_codes,omitempty"` - -// 5XX or ALL -HTTPCodeIntervals []*string `json:"httpCodeIntervals,omitempty" tf:"http_code_intervals,omitempty"` - -// 599 -HTTPCodes []*float64 `json:"httpCodes,omitempty" tf:"http_codes,omitempty"` + // 599 + HTTPCodes []*float64 `json:"httpCodes,omitempty" tf:"http_codes,omitempty"` } - type DiscardRuleParameters struct { + // +kubebuilder:validation:Optional + DiscardPercent *float64 `json:"discardPercent,omitempty" tf:"discard_percent,omitempty"` -// +kubebuilder:validation:Optional -DiscardPercent *float64 `json:"discardPercent,omitempty" tf:"discard_percent,omitempty"` + // list of grpc codes by name, e.g, ["NOT_FOUND", "RESOURCE_EXHAUSTED"] + // +kubebuilder:validation:Optional + GRPCCodes []*string `json:"grpcCodes,omitempty" tf:"grpc_codes,omitempty"` -// list of grpc codes by name, e.g, ["NOT_FOUND", "RESOURCE_EXHAUSTED"] -// +kubebuilder:validation:Optional -GRPCCodes []*string `json:"grpcCodes,omitempty" tf:"grpc_codes,omitempty"` + // 5XX or ALL + // +kubebuilder:validation:Optional + HTTPCodeIntervals []*string `json:"httpCodeIntervals,omitempty" tf:"http_code_intervals,omitempty"` -// 5XX or ALL -// +kubebuilder:validation:Optional -HTTPCodeIntervals []*string `json:"httpCodeIntervals,omitempty" tf:"http_code_intervals,omitempty"` - -// 599 -// +kubebuilder:validation:Optional -HTTPCodes []*float64 `json:"httpCodes,omitempty" tf:"http_codes,omitempty"` + // 599 + // +kubebuilder:validation:Optional + HTTPCodes []*float64 `json:"httpCodes,omitempty" tf:"http_codes,omitempty"` } - type EndpointInitParameters struct { + // Provided by the client or computed automatically. + Address []AddressInitParameters `json:"address,omitempty" tf:"address,omitempty"` -// Provided by the client or computed automatically. -Address []AddressInitParameters `json:"address,omitempty" tf:"address,omitempty"` - -// One or more ports to listen on. -Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + // One or more ports to listen on. + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` } - type EndpointObservation struct { + // Provided by the client or computed automatically. + Address []AddressObservation `json:"address,omitempty" tf:"address,omitempty"` -// Provided by the client or computed automatically. -Address []AddressObservation `json:"address,omitempty" tf:"address,omitempty"` - -// One or more ports to listen on. -Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + // One or more ports to listen on. + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` } - type EndpointParameters struct { + // Provided by the client or computed automatically. + // +kubebuilder:validation:Optional + Address []AddressParameters `json:"address" tf:"address,omitempty"` -// Provided by the client or computed automatically. -// +kubebuilder:validation:Optional -Address []AddressParameters `json:"address" tf:"address,omitempty"` - -// One or more ports to listen on. -// +kubebuilder:validation:Optional -Ports []*float64 `json:"ports" tf:"ports,omitempty"` + // One or more ports to listen on. + // +kubebuilder:validation:Optional + Ports []*float64 `json:"ports" tf:"ports,omitempty"` } - type ExternalIPv4AddressInitParameters struct { - -// Provided by the client or computed automatically. -Address *string `json:"address,omitempty" tf:"address,omitempty"` + // Provided by the client or computed automatically. + Address *string `json:"address,omitempty" tf:"address,omitempty"` } - type ExternalIPv4AddressObservation struct { - -// Provided by the client or computed automatically. -Address *string `json:"address,omitempty" tf:"address,omitempty"` + // Provided by the client or computed automatically. + Address *string `json:"address,omitempty" tf:"address,omitempty"` } - type ExternalIPv4AddressParameters struct { - -// Provided by the client or computed automatically. -// +kubebuilder:validation:Optional -Address *string `json:"address,omitempty" tf:"address,omitempty"` + // Provided by the client or computed automatically. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` } - type ExternalIPv6AddressInitParameters struct { - -// Provided by the client or computed automatically. -Address *string `json:"address,omitempty" tf:"address,omitempty"` + // Provided by the client or computed automatically. + Address *string `json:"address,omitempty" tf:"address,omitempty"` } - type ExternalIPv6AddressObservation struct { - -// Provided by the client or computed automatically. -Address *string `json:"address,omitempty" tf:"address,omitempty"` + // Provided by the client or computed automatically. + Address *string `json:"address,omitempty" tf:"address,omitempty"` } - type ExternalIPv6AddressParameters struct { - -// Provided by the client or computed automatically. -// +kubebuilder:validation:Optional -Address *string `json:"address,omitempty" tf:"address,omitempty"` + // Provided by the client or computed automatically. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` } - type HTTPHandlerHttp2OptionsInitParameters struct { - -// Maximum number of concurrent streams. -MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` + // Maximum number of concurrent streams. + MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` } - type HTTPHandlerHttp2OptionsObservation struct { - -// Maximum number of concurrent streams. -MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` + // Maximum number of concurrent streams. + MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` } - type HTTPHandlerHttp2OptionsParameters struct { - -// Maximum number of concurrent streams. -// +kubebuilder:validation:Optional -MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` + // Maximum number of concurrent streams. + // +kubebuilder:validation:Optional + MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` } - type HTTPHandlerInitParameters struct { + // If set, will enable only HTTP1 protocol with HTTP1.0 support. + AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` -// If set, will enable only HTTP1 protocol with HTTP1.0 support. -AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` + // HTTP router id. + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// HTTP router id. -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // If set, will enable HTTP2 protocol for the handler. The structure is documented below. + Http2Options []HTTPHandlerHttp2OptionsInitParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` -// If set, will enable HTTP2 protocol for the handler. The structure is documented below. -Http2Options []HTTPHandlerHttp2OptionsInitParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` - -// When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. -RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` + // When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. + RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` } - type HTTPHandlerObservation struct { + // If set, will enable only HTTP1 protocol with HTTP1.0 support. + AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` -// If set, will enable only HTTP1 protocol with HTTP1.0 support. -AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` + // HTTP router id. + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// HTTP router id. -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // If set, will enable HTTP2 protocol for the handler. The structure is documented below. + Http2Options []HTTPHandlerHttp2OptionsObservation `json:"http2Options,omitempty" tf:"http2_options,omitempty"` -// If set, will enable HTTP2 protocol for the handler. The structure is documented below. -Http2Options []HTTPHandlerHttp2OptionsObservation `json:"http2Options,omitempty" tf:"http2_options,omitempty"` - -// When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. -RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` + // When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. + RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` } - type HTTPHandlerParameters struct { + // If set, will enable only HTTP1 protocol with HTTP1.0 support. + // +kubebuilder:validation:Optional + AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` -// If set, will enable only HTTP1 protocol with HTTP1.0 support. -// +kubebuilder:validation:Optional -AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` - -// HTTP router id. -// +kubebuilder:validation:Optional -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // HTTP router id. + // +kubebuilder:validation:Optional + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// If set, will enable HTTP2 protocol for the handler. The structure is documented below. -// +kubebuilder:validation:Optional -Http2Options []HTTPHandlerHttp2OptionsParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` + // If set, will enable HTTP2 protocol for the handler. The structure is documented below. + // +kubebuilder:validation:Optional + Http2Options []HTTPHandlerHttp2OptionsParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` -// When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. -// +kubebuilder:validation:Optional -RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` + // When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. + // +kubebuilder:validation:Optional + RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` } - type HTTPInitParameters struct { + // Stream handler that sets plaintext Stream backend group. The structure is documented below. + Handler []HandlerInitParameters `json:"handler,omitempty" tf:"handler,omitempty"` -// Stream handler that sets plaintext Stream backend group. The structure is documented below. -Handler []HandlerInitParameters `json:"handler,omitempty" tf:"handler,omitempty"` - -// Shortcut for adding http -> https redirects. The structure is documented below. -Redirects []RedirectsInitParameters `json:"redirects,omitempty" tf:"redirects,omitempty"` + // Shortcut for adding http -> https redirects. The structure is documented below. + Redirects []RedirectsInitParameters `json:"redirects,omitempty" tf:"redirects,omitempty"` } - type HTTPObservation struct { + // Stream handler that sets plaintext Stream backend group. The structure is documented below. + Handler []HandlerObservation `json:"handler,omitempty" tf:"handler,omitempty"` -// Stream handler that sets plaintext Stream backend group. The structure is documented below. -Handler []HandlerObservation `json:"handler,omitempty" tf:"handler,omitempty"` - -// Shortcut for adding http -> https redirects. The structure is documented below. -Redirects []RedirectsObservation `json:"redirects,omitempty" tf:"redirects,omitempty"` + // Shortcut for adding http -> https redirects. The structure is documented below. + Redirects []RedirectsObservation `json:"redirects,omitempty" tf:"redirects,omitempty"` } - type HTTPParameters struct { + // Stream handler that sets plaintext Stream backend group. The structure is documented below. + // +kubebuilder:validation:Optional + Handler []HandlerParameters `json:"handler,omitempty" tf:"handler,omitempty"` -// Stream handler that sets plaintext Stream backend group. The structure is documented below. -// +kubebuilder:validation:Optional -Handler []HandlerParameters `json:"handler,omitempty" tf:"handler,omitempty"` - -// Shortcut for adding http -> https redirects. The structure is documented below. -// +kubebuilder:validation:Optional -Redirects []RedirectsParameters `json:"redirects,omitempty" tf:"redirects,omitempty"` + // Shortcut for adding http -> https redirects. The structure is documented below. + // +kubebuilder:validation:Optional + Redirects []RedirectsParameters `json:"redirects,omitempty" tf:"redirects,omitempty"` } - type HandlerHTTPHandlerHttp2OptionsInitParameters struct { - -// Maximum number of concurrent streams. -MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` + // Maximum number of concurrent streams. + MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` } - type HandlerHTTPHandlerHttp2OptionsObservation struct { - -// Maximum number of concurrent streams. -MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` + // Maximum number of concurrent streams. + MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` } - type HandlerHTTPHandlerHttp2OptionsParameters struct { - -// Maximum number of concurrent streams. -// +kubebuilder:validation:Optional -MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` + // Maximum number of concurrent streams. + // +kubebuilder:validation:Optional + MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` } - type HandlerHTTPHandlerInitParameters struct { + // If set, will enable only HTTP1 protocol with HTTP1.0 support. + AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` -// If set, will enable only HTTP1 protocol with HTTP1.0 support. -AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` + // HTTP router id. + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// HTTP router id. -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // If set, will enable HTTP2 protocol for the handler. The structure is documented below. + Http2Options []HandlerHTTPHandlerHttp2OptionsInitParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` -// If set, will enable HTTP2 protocol for the handler. The structure is documented below. -Http2Options []HandlerHTTPHandlerHttp2OptionsInitParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` - -// When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. -RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` + // When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. + RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` } - type HandlerHTTPHandlerObservation struct { + // If set, will enable only HTTP1 protocol with HTTP1.0 support. + AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` -// If set, will enable only HTTP1 protocol with HTTP1.0 support. -AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` - -// HTTP router id. -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // HTTP router id. + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// If set, will enable HTTP2 protocol for the handler. The structure is documented below. -Http2Options []HandlerHTTPHandlerHttp2OptionsObservation `json:"http2Options,omitempty" tf:"http2_options,omitempty"` + // If set, will enable HTTP2 protocol for the handler. The structure is documented below. + Http2Options []HandlerHTTPHandlerHttp2OptionsObservation `json:"http2Options,omitempty" tf:"http2_options,omitempty"` -// When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. -RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` + // When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. + RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` } - type HandlerHTTPHandlerParameters struct { + // If set, will enable only HTTP1 protocol with HTTP1.0 support. + // +kubebuilder:validation:Optional + AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` -// If set, will enable only HTTP1 protocol with HTTP1.0 support. -// +kubebuilder:validation:Optional -AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` - -// HTTP router id. -// +kubebuilder:validation:Optional -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // HTTP router id. + // +kubebuilder:validation:Optional + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// If set, will enable HTTP2 protocol for the handler. The structure is documented below. -// +kubebuilder:validation:Optional -Http2Options []HandlerHTTPHandlerHttp2OptionsParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` + // If set, will enable HTTP2 protocol for the handler. The structure is documented below. + // +kubebuilder:validation:Optional + Http2Options []HandlerHTTPHandlerHttp2OptionsParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` -// When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. -// +kubebuilder:validation:Optional -RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` + // When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. + // +kubebuilder:validation:Optional + RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` } - type HandlerInitParameters struct { + // If set, will enable only HTTP1 protocol with HTTP1.0 support. + AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` -// If set, will enable only HTTP1 protocol with HTTP1.0 support. -AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` - -// HTTP router id. -// +crossplane:generate:reference:type=HTTPRouter -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // HTTP router id. + // +crossplane:generate:reference:type=HTTPRouter + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// Reference to a HTTPRouter to populate httpRouterId. -// +kubebuilder:validation:Optional -HTTPRouterIDRef *v1.Reference `json:"httpRouterIdRef,omitempty" tf:"-"` + // Reference to a HTTPRouter to populate httpRouterId. + // +kubebuilder:validation:Optional + HTTPRouterIDRef *v1.Reference `json:"httpRouterIdRef,omitempty" tf:"-"` -// Selector for a HTTPRouter to populate httpRouterId. -// +kubebuilder:validation:Optional -HTTPRouterIDSelector *v1.Selector `json:"httpRouterIdSelector,omitempty" tf:"-"` + // Selector for a HTTPRouter to populate httpRouterId. + // +kubebuilder:validation:Optional + HTTPRouterIDSelector *v1.Selector `json:"httpRouterIdSelector,omitempty" tf:"-"` -// If set, will enable HTTP2 protocol for the handler. The structure is documented below. -Http2Options []Http2OptionsInitParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` + // If set, will enable HTTP2 protocol for the handler. The structure is documented below. + Http2Options []Http2OptionsInitParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` -// When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. -RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` + // When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. + RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` } - type HandlerObservation struct { + // If set, will enable only HTTP1 protocol with HTTP1.0 support. + AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` -// If set, will enable only HTTP1 protocol with HTTP1.0 support. -AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` + // HTTP router id. + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// HTTP router id. -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // If set, will enable HTTP2 protocol for the handler. The structure is documented below. + Http2Options []Http2OptionsObservation `json:"http2Options,omitempty" tf:"http2_options,omitempty"` -// If set, will enable HTTP2 protocol for the handler. The structure is documented below. -Http2Options []Http2OptionsObservation `json:"http2Options,omitempty" tf:"http2_options,omitempty"` - -// When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. -RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` + // When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. + RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` } - type HandlerParameters struct { + // If set, will enable only HTTP1 protocol with HTTP1.0 support. + // +kubebuilder:validation:Optional + AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` -// If set, will enable only HTTP1 protocol with HTTP1.0 support. -// +kubebuilder:validation:Optional -AllowHttp10 *bool `json:"allowHttp10,omitempty" tf:"allow_http10,omitempty"` - -// HTTP router id. -// +crossplane:generate:reference:type=HTTPRouter -// +kubebuilder:validation:Optional -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // HTTP router id. + // +crossplane:generate:reference:type=HTTPRouter + // +kubebuilder:validation:Optional + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// Reference to a HTTPRouter to populate httpRouterId. -// +kubebuilder:validation:Optional -HTTPRouterIDRef *v1.Reference `json:"httpRouterIdRef,omitempty" tf:"-"` + // Reference to a HTTPRouter to populate httpRouterId. + // +kubebuilder:validation:Optional + HTTPRouterIDRef *v1.Reference `json:"httpRouterIdRef,omitempty" tf:"-"` -// Selector for a HTTPRouter to populate httpRouterId. -// +kubebuilder:validation:Optional -HTTPRouterIDSelector *v1.Selector `json:"httpRouterIdSelector,omitempty" tf:"-"` + // Selector for a HTTPRouter to populate httpRouterId. + // +kubebuilder:validation:Optional + HTTPRouterIDSelector *v1.Selector `json:"httpRouterIdSelector,omitempty" tf:"-"` -// If set, will enable HTTP2 protocol for the handler. The structure is documented below. -// +kubebuilder:validation:Optional -Http2Options []Http2OptionsParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` + // If set, will enable HTTP2 protocol for the handler. The structure is documented below. + // +kubebuilder:validation:Optional + Http2Options []Http2OptionsParameters `json:"http2Options,omitempty" tf:"http2_options,omitempty"` -// When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. -// +kubebuilder:validation:Optional -RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` + // When unset, will preserve the incoming x-request-id header, otherwise would rewrite it with a new value. + // +kubebuilder:validation:Optional + RewriteRequestID *bool `json:"rewriteRequestId,omitempty" tf:"rewrite_request_id,omitempty"` } - type HandlerStreamHandlerInitParameters struct { - -// Backend group id. -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group id. + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` } - type HandlerStreamHandlerObservation struct { - -// Backend group id. -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group id. + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` } - type HandlerStreamHandlerParameters struct { - -// Backend group id. -// +kubebuilder:validation:Optional -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group id. + // +kubebuilder:validation:Optional + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` } - type Http2OptionsInitParameters struct { - -// Maximum number of concurrent streams. -MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` + // Maximum number of concurrent streams. + MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` } - type Http2OptionsObservation struct { - -// Maximum number of concurrent streams. -MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` + // Maximum number of concurrent streams. + MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` } - type Http2OptionsParameters struct { - -// Maximum number of concurrent streams. -// +kubebuilder:validation:Optional -MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` + // Maximum number of concurrent streams. + // +kubebuilder:validation:Optional + MaxConcurrentStreams *float64 `json:"maxConcurrentStreams,omitempty" tf:"max_concurrent_streams,omitempty"` } - type InternalIPv4AddressInitParameters struct { + // Provided by the client or computed automatically. + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// Provided by the client or computed automatically. -Address *string `json:"address,omitempty" tf:"address,omitempty"` - -// Provided by the client or computed automatically. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Provided by the client or computed automatically. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type InternalIPv4AddressObservation struct { + // Provided by the client or computed automatically. + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// Provided by the client or computed automatically. -Address *string `json:"address,omitempty" tf:"address,omitempty"` - -// Provided by the client or computed automatically. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Provided by the client or computed automatically. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type InternalIPv4AddressParameters struct { + // Provided by the client or computed automatically. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// Provided by the client or computed automatically. -// +kubebuilder:validation:Optional -Address *string `json:"address,omitempty" tf:"address,omitempty"` - -// Provided by the client or computed automatically. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Provided by the client or computed automatically. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type ListenerInitParameters struct { + // Network endpoints (addresses and ports) of the listener. The structure is documented below. + Endpoint []EndpointInitParameters `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// Network endpoints (addresses and ports) of the listener. The structure is documented below. -Endpoint []EndpointInitParameters `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // HTTP listener resource. The structure is documented below. + HTTP []HTTPInitParameters `json:"http,omitempty" tf:"http,omitempty"` -// HTTP listener resource. The structure is documented below. -HTTP []HTTPInitParameters `json:"http,omitempty" tf:"http,omitempty"` + // name of the listener. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// name of the listener. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Stream listener resource. The structure is documented below. + Stream []StreamInitParameters `json:"stream,omitempty" tf:"stream,omitempty"` -// Stream listener resource. The structure is documented below. -Stream []StreamInitParameters `json:"stream,omitempty" tf:"stream,omitempty"` - -// TLS listener resource. The structure is documented below. -TLS []ListenerTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` + // TLS listener resource. The structure is documented below. + TLS []ListenerTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` } - type ListenerObservation struct { + // Network endpoints (addresses and ports) of the listener. The structure is documented below. + Endpoint []EndpointObservation `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// Network endpoints (addresses and ports) of the listener. The structure is documented below. -Endpoint []EndpointObservation `json:"endpoint,omitempty" tf:"endpoint,omitempty"` - -// HTTP listener resource. The structure is documented below. -HTTP []HTTPObservation `json:"http,omitempty" tf:"http,omitempty"` + // HTTP listener resource. The structure is documented below. + HTTP []HTTPObservation `json:"http,omitempty" tf:"http,omitempty"` -// name of the listener. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // name of the listener. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Stream listener resource. The structure is documented below. -Stream []StreamObservation `json:"stream,omitempty" tf:"stream,omitempty"` + // Stream listener resource. The structure is documented below. + Stream []StreamObservation `json:"stream,omitempty" tf:"stream,omitempty"` -// TLS listener resource. The structure is documented below. -TLS []ListenerTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` + // TLS listener resource. The structure is documented below. + TLS []ListenerTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` } - type ListenerParameters struct { + // Network endpoints (addresses and ports) of the listener. The structure is documented below. + // +kubebuilder:validation:Optional + Endpoint []EndpointParameters `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// Network endpoints (addresses and ports) of the listener. The structure is documented below. -// +kubebuilder:validation:Optional -Endpoint []EndpointParameters `json:"endpoint,omitempty" tf:"endpoint,omitempty"` - -// HTTP listener resource. The structure is documented below. -// +kubebuilder:validation:Optional -HTTP []HTTPParameters `json:"http,omitempty" tf:"http,omitempty"` + // HTTP listener resource. The structure is documented below. + // +kubebuilder:validation:Optional + HTTP []HTTPParameters `json:"http,omitempty" tf:"http,omitempty"` -// name of the listener. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // name of the listener. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// Stream listener resource. The structure is documented below. -// +kubebuilder:validation:Optional -Stream []StreamParameters `json:"stream,omitempty" tf:"stream,omitempty"` + // Stream listener resource. The structure is documented below. + // +kubebuilder:validation:Optional + Stream []StreamParameters `json:"stream,omitempty" tf:"stream,omitempty"` -// TLS listener resource. The structure is documented below. -// +kubebuilder:validation:Optional -TLS []ListenerTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` + // TLS listener resource. The structure is documented below. + // +kubebuilder:validation:Optional + TLS []ListenerTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` } - type ListenerTLSInitParameters struct { + // TLS handler resource. The structure is documented below. + DefaultHandler []DefaultHandlerInitParameters `json:"defaultHandler,omitempty" tf:"default_handler,omitempty"` -// TLS handler resource. The structure is documented below. -DefaultHandler []DefaultHandlerInitParameters `json:"defaultHandler,omitempty" tf:"default_handler,omitempty"` - -// SNI match resource. The structure is documented below. -SniHandler []SniHandlerInitParameters `json:"sniHandler,omitempty" tf:"sni_handler,omitempty"` + // SNI match resource. The structure is documented below. + SniHandler []SniHandlerInitParameters `json:"sniHandler,omitempty" tf:"sni_handler,omitempty"` } - type ListenerTLSObservation struct { + // TLS handler resource. The structure is documented below. + DefaultHandler []DefaultHandlerObservation `json:"defaultHandler,omitempty" tf:"default_handler,omitempty"` -// TLS handler resource. The structure is documented below. -DefaultHandler []DefaultHandlerObservation `json:"defaultHandler,omitempty" tf:"default_handler,omitempty"` - -// SNI match resource. The structure is documented below. -SniHandler []SniHandlerObservation `json:"sniHandler,omitempty" tf:"sni_handler,omitempty"` + // SNI match resource. The structure is documented below. + SniHandler []SniHandlerObservation `json:"sniHandler,omitempty" tf:"sni_handler,omitempty"` } - type ListenerTLSParameters struct { + // TLS handler resource. The structure is documented below. + // +kubebuilder:validation:Optional + DefaultHandler []DefaultHandlerParameters `json:"defaultHandler" tf:"default_handler,omitempty"` -// TLS handler resource. The structure is documented below. -// +kubebuilder:validation:Optional -DefaultHandler []DefaultHandlerParameters `json:"defaultHandler" tf:"default_handler,omitempty"` - -// SNI match resource. The structure is documented below. -// +kubebuilder:validation:Optional -SniHandler []SniHandlerParameters `json:"sniHandler,omitempty" tf:"sni_handler,omitempty"` + // SNI match resource. The structure is documented below. + // +kubebuilder:validation:Optional + SniHandler []SniHandlerParameters `json:"sniHandler,omitempty" tf:"sni_handler,omitempty"` } - type LoadBalancerInitParameters struct { + // Allocation zones for the Load Balancer instance. The structure is documented below. + AllocationPolicy []AllocationPolicyInitParameters `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` -// Allocation zones for the Load Balancer instance. The structure is documented below. -AllocationPolicy []AllocationPolicyInitParameters `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` - -// An optional description of the Load Balancer. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // An optional description of the Load Balancer. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Labels to assign to this Load Balancer. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this Load Balancer. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// List of listeners for the Load Balancer. The structure is documented below. -Listener []ListenerInitParameters `json:"listener,omitempty" tf:"listener,omitempty"` + // List of listeners for the Load Balancer. The structure is documented below. + Listener []ListenerInitParameters `json:"listener,omitempty" tf:"listener,omitempty"` -// Cloud Logging settings. The structure is documented below. -LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Cloud Logging settings. The structure is documented below. + LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Name of the Load Balancer. Provided by the client when the Load Balancer is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Load Balancer. Provided by the client when the Load Balancer is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network that the Load Balancer is located at. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network that the Load Balancer is located at. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// ID of the region that the Load Balancer is located at. -RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + // ID of the region that the Load Balancer is located at. + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` -// A list of ID's of security groups attached to the Load Balancer. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A list of ID's of security groups attached to the Load Balancer. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` } - type LoadBalancerObservation struct { + // Allocation zones for the Load Balancer instance. The structure is documented below. + AllocationPolicy []AllocationPolicyObservation `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` -// Allocation zones for the Load Balancer instance. The structure is documented below. -AllocationPolicy []AllocationPolicyObservation `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` - -// The Load Balancer creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // The Load Balancer creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// An optional description of the Load Balancer. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // An optional description of the Load Balancer. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the Load Balancer. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The ID of the Load Balancer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Labels to assign to this Load Balancer. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this Load Balancer. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// List of listeners for the Load Balancer. The structure is documented below. -Listener []ListenerObservation `json:"listener,omitempty" tf:"listener,omitempty"` + // List of listeners for the Load Balancer. The structure is documented below. + Listener []ListenerObservation `json:"listener,omitempty" tf:"listener,omitempty"` -// Cloud log group used by the Load Balancer to store access logs. -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Cloud log group used by the Load Balancer to store access logs. + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Cloud Logging settings. The structure is documented below. -LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Cloud Logging settings. The structure is documented below. + LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Name of the Load Balancer. Provided by the client when the Load Balancer is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Load Balancer. Provided by the client when the Load Balancer is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network that the Load Balancer is located at. -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network that the Load Balancer is located at. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// ID of the region that the Load Balancer is located at. -RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + // ID of the region that the Load Balancer is located at. + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` -// A list of ID's of security groups attached to the Load Balancer. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A list of ID's of security groups attached to the Load Balancer. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// Status of the Load Balancer. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // Status of the Load Balancer. + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type LoadBalancerParameters struct { + // Allocation zones for the Load Balancer instance. The structure is documented below. + // +kubebuilder:validation:Optional + AllocationPolicy []AllocationPolicyParameters `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` -// Allocation zones for the Load Balancer instance. The structure is documented below. -// +kubebuilder:validation:Optional -AllocationPolicy []AllocationPolicyParameters `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + // An optional description of the Load Balancer. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// An optional description of the Load Balancer. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Labels to assign to this Load Balancer. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Labels to assign to this Load Balancer. A list of key/value pairs. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // List of listeners for the Load Balancer. The structure is documented below. + // +kubebuilder:validation:Optional + Listener []ListenerParameters `json:"listener,omitempty" tf:"listener,omitempty"` -// List of listeners for the Load Balancer. The structure is documented below. -// +kubebuilder:validation:Optional -Listener []ListenerParameters `json:"listener,omitempty" tf:"listener,omitempty"` + // Cloud Logging settings. The structure is documented below. + // +kubebuilder:validation:Optional + LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Cloud Logging settings. The structure is documented below. -// +kubebuilder:validation:Optional -LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Name of the Load Balancer. Provided by the client when the Load Balancer is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the Load Balancer. Provided by the client when the Load Balancer is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // ID of the network that the Load Balancer is located at. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// ID of the network that the Load Balancer is located at. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // ID of the region that the Load Balancer is located at. + // +kubebuilder:validation:Optional + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` -// ID of the region that the Load Balancer is located at. -// +kubebuilder:validation:Optional -RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + // A list of ID's of security groups attached to the Load Balancer. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// A list of ID's of security groups attached to the Load Balancer. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` - -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` } - type LocationInitParameters struct { + // If set, will disable all L7 instances in the zone for request handling. + DisableTraffic *bool `json:"disableTraffic,omitempty" tf:"disable_traffic,omitempty"` -// If set, will disable all L7 instances in the zone for request handling. -DisableTraffic *bool `json:"disableTraffic,omitempty" tf:"disable_traffic,omitempty"` + // ID of the subnet that location is located at. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// ID of the subnet that location is located at. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// ID of the zone that location is located at. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // ID of the zone that location is located at. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type LocationObservation struct { + // If set, will disable all L7 instances in the zone for request handling. + DisableTraffic *bool `json:"disableTraffic,omitempty" tf:"disable_traffic,omitempty"` -// If set, will disable all L7 instances in the zone for request handling. -DisableTraffic *bool `json:"disableTraffic,omitempty" tf:"disable_traffic,omitempty"` - -// ID of the subnet that location is located at. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // ID of the subnet that location is located at. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// ID of the zone that location is located at. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // ID of the zone that location is located at. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type LocationParameters struct { + // If set, will disable all L7 instances in the zone for request handling. + // +kubebuilder:validation:Optional + DisableTraffic *bool `json:"disableTraffic,omitempty" tf:"disable_traffic,omitempty"` -// If set, will disable all L7 instances in the zone for request handling. -// +kubebuilder:validation:Optional -DisableTraffic *bool `json:"disableTraffic,omitempty" tf:"disable_traffic,omitempty"` - -// ID of the subnet that location is located at. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // ID of the subnet that location is located at. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// ID of the zone that location is located at. -// +kubebuilder:validation:Optional -ZoneID *string `json:"zoneId" tf:"zone_id,omitempty"` + // ID of the zone that location is located at. + // +kubebuilder:validation:Optional + ZoneID *string `json:"zoneId" tf:"zone_id,omitempty"` } - type LogOptionsInitParameters struct { + // Set to true to disable Cloud Logging for the balancer + Disable *bool `json:"disable,omitempty" tf:"disable,omitempty"` -// Set to true to disable Cloud Logging for the balancer -Disable *bool `json:"disable,omitempty" tf:"disable,omitempty"` + // List of rules to discard a fraction of logs. The structure is documented below. + DiscardRule []DiscardRuleInitParameters `json:"discardRule,omitempty" tf:"discard_rule,omitempty"` -// List of rules to discard a fraction of logs. The structure is documented below. -DiscardRule []DiscardRuleInitParameters `json:"discardRule,omitempty" tf:"discard_rule,omitempty"` - -// Cloud Logging group ID to send logs to. Leave empty to use the balancer folder default log group. -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Cloud Logging group ID to send logs to. Leave empty to use the balancer folder default log group. + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` } - type LogOptionsObservation struct { + // Set to true to disable Cloud Logging for the balancer + Disable *bool `json:"disable,omitempty" tf:"disable,omitempty"` -// Set to true to disable Cloud Logging for the balancer -Disable *bool `json:"disable,omitempty" tf:"disable,omitempty"` - -// List of rules to discard a fraction of logs. The structure is documented below. -DiscardRule []DiscardRuleObservation `json:"discardRule,omitempty" tf:"discard_rule,omitempty"` + // List of rules to discard a fraction of logs. The structure is documented below. + DiscardRule []DiscardRuleObservation `json:"discardRule,omitempty" tf:"discard_rule,omitempty"` -// Cloud Logging group ID to send logs to. Leave empty to use the balancer folder default log group. -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Cloud Logging group ID to send logs to. Leave empty to use the balancer folder default log group. + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` } - type LogOptionsParameters struct { + // Set to true to disable Cloud Logging for the balancer + // +kubebuilder:validation:Optional + Disable *bool `json:"disable,omitempty" tf:"disable,omitempty"` -// Set to true to disable Cloud Logging for the balancer -// +kubebuilder:validation:Optional -Disable *bool `json:"disable,omitempty" tf:"disable,omitempty"` - -// List of rules to discard a fraction of logs. The structure is documented below. -// +kubebuilder:validation:Optional -DiscardRule []DiscardRuleParameters `json:"discardRule,omitempty" tf:"discard_rule,omitempty"` + // List of rules to discard a fraction of logs. The structure is documented below. + // +kubebuilder:validation:Optional + DiscardRule []DiscardRuleParameters `json:"discardRule,omitempty" tf:"discard_rule,omitempty"` -// Cloud Logging group ID to send logs to. Leave empty to use the balancer folder default log group. -// +kubebuilder:validation:Optional -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Cloud Logging group ID to send logs to. Leave empty to use the balancer folder default log group. + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` } - type RedirectsInitParameters struct { - -// If set redirects all unencrypted HTTP requests to the same URI with scheme changed to https. -HTTPToHTTPS *bool `json:"httpToHttps,omitempty" tf:"http_to_https,omitempty"` + // If set redirects all unencrypted HTTP requests to the same URI with scheme changed to https. + HTTPToHTTPS *bool `json:"httpToHttps,omitempty" tf:"http_to_https,omitempty"` } - type RedirectsObservation struct { - -// If set redirects all unencrypted HTTP requests to the same URI with scheme changed to https. -HTTPToHTTPS *bool `json:"httpToHttps,omitempty" tf:"http_to_https,omitempty"` + // If set redirects all unencrypted HTTP requests to the same URI with scheme changed to https. + HTTPToHTTPS *bool `json:"httpToHttps,omitempty" tf:"http_to_https,omitempty"` } - type RedirectsParameters struct { - -// If set redirects all unencrypted HTTP requests to the same URI with scheme changed to https. -// +kubebuilder:validation:Optional -HTTPToHTTPS *bool `json:"httpToHttps,omitempty" tf:"http_to_https,omitempty"` + // If set redirects all unencrypted HTTP requests to the same URI with scheme changed to https. + // +kubebuilder:validation:Optional + HTTPToHTTPS *bool `json:"httpToHttps,omitempty" tf:"http_to_https,omitempty"` } - type SniHandlerHandlerInitParameters struct { + // Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. + // +listType=set + CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` -// Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. -// +listType=set -CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` - -// HTTP handler resource. The structure is documented below. -HTTPHandler []HandlerHTTPHandlerInitParameters `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` + // HTTP handler resource. The structure is documented below. + HTTPHandler []HandlerHTTPHandlerInitParameters `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` -// Stream handler resource. The structure is documented below. -StreamHandler []HandlerStreamHandlerInitParameters `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` + // Stream handler resource. The structure is documented below. + StreamHandler []HandlerStreamHandlerInitParameters `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` } - type SniHandlerHandlerObservation struct { + // Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. + // +listType=set + CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` -// Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. -// +listType=set -CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` - -// HTTP handler resource. The structure is documented below. -HTTPHandler []HandlerHTTPHandlerObservation `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` + // HTTP handler resource. The structure is documented below. + HTTPHandler []HandlerHTTPHandlerObservation `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` -// Stream handler resource. The structure is documented below. -StreamHandler []HandlerStreamHandlerObservation `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` + // Stream handler resource. The structure is documented below. + StreamHandler []HandlerStreamHandlerObservation `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` } - type SniHandlerHandlerParameters struct { + // Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. + // +kubebuilder:validation:Optional + // +listType=set + CertificateIds []*string `json:"certificateIds" tf:"certificate_ids,omitempty"` -// Certificate IDs in the Certificate Manager. Multiple TLS certificates can be associated with the same context to allow both RSA and ECDSA certificates. Only the first certificate of each type will be used. -// +kubebuilder:validation:Optional -// +listType=set -CertificateIds []*string `json:"certificateIds" tf:"certificate_ids,omitempty"` - -// HTTP handler resource. The structure is documented below. -// +kubebuilder:validation:Optional -HTTPHandler []HandlerHTTPHandlerParameters `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` + // HTTP handler resource. The structure is documented below. + // +kubebuilder:validation:Optional + HTTPHandler []HandlerHTTPHandlerParameters `json:"httpHandler,omitempty" tf:"http_handler,omitempty"` -// Stream handler resource. The structure is documented below. -// +kubebuilder:validation:Optional -StreamHandler []HandlerStreamHandlerParameters `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` + // Stream handler resource. The structure is documented below. + // +kubebuilder:validation:Optional + StreamHandler []HandlerStreamHandlerParameters `json:"streamHandler,omitempty" tf:"stream_handler,omitempty"` } - type SniHandlerInitParameters struct { + // Stream handler that sets plaintext Stream backend group. The structure is documented below. + Handler []SniHandlerHandlerInitParameters `json:"handler,omitempty" tf:"handler,omitempty"` -// Stream handler that sets plaintext Stream backend group. The structure is documented below. -Handler []SniHandlerHandlerInitParameters `json:"handler,omitempty" tf:"handler,omitempty"` + // name of SNI match. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// name of SNI match. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// A set of server names. -// +listType=set -ServerNames []*string `json:"serverNames,omitempty" tf:"server_names,omitempty"` + // A set of server names. + // +listType=set + ServerNames []*string `json:"serverNames,omitempty" tf:"server_names,omitempty"` } - type SniHandlerObservation struct { + // Stream handler that sets plaintext Stream backend group. The structure is documented below. + Handler []SniHandlerHandlerObservation `json:"handler,omitempty" tf:"handler,omitempty"` -// Stream handler that sets plaintext Stream backend group. The structure is documented below. -Handler []SniHandlerHandlerObservation `json:"handler,omitempty" tf:"handler,omitempty"` - -// name of SNI match. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // name of SNI match. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A set of server names. -// +listType=set -ServerNames []*string `json:"serverNames,omitempty" tf:"server_names,omitempty"` + // A set of server names. + // +listType=set + ServerNames []*string `json:"serverNames,omitempty" tf:"server_names,omitempty"` } - type SniHandlerParameters struct { + // Stream handler that sets plaintext Stream backend group. The structure is documented below. + // +kubebuilder:validation:Optional + Handler []SniHandlerHandlerParameters `json:"handler" tf:"handler,omitempty"` -// Stream handler that sets plaintext Stream backend group. The structure is documented below. -// +kubebuilder:validation:Optional -Handler []SniHandlerHandlerParameters `json:"handler" tf:"handler,omitempty"` - -// name of SNI match. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // name of SNI match. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// A set of server names. -// +kubebuilder:validation:Optional -// +listType=set -ServerNames []*string `json:"serverNames" tf:"server_names,omitempty"` + // A set of server names. + // +kubebuilder:validation:Optional + // +listType=set + ServerNames []*string `json:"serverNames" tf:"server_names,omitempty"` } - type StreamHandlerInitParameters struct { - -// Backend group id. -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group id. + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` } - type StreamHandlerObservation struct { - -// Backend group id. -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group id. + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` } - type StreamHandlerParameters struct { - -// Backend group id. -// +kubebuilder:validation:Optional -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group id. + // +kubebuilder:validation:Optional + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` } - type StreamInitParameters struct { - -// Stream handler that sets plaintext Stream backend group. The structure is documented below. -Handler []StreamHandlerInitParameters `json:"handler,omitempty" tf:"handler,omitempty"` + // Stream handler that sets plaintext Stream backend group. The structure is documented below. + Handler []StreamHandlerInitParameters `json:"handler,omitempty" tf:"handler,omitempty"` } - type StreamObservation struct { - -// Stream handler that sets plaintext Stream backend group. The structure is documented below. -Handler []StreamHandlerObservation `json:"handler,omitempty" tf:"handler,omitempty"` + // Stream handler that sets plaintext Stream backend group. The structure is documented below. + Handler []StreamHandlerObservation `json:"handler,omitempty" tf:"handler,omitempty"` } - type StreamParameters struct { - -// Stream handler that sets plaintext Stream backend group. The structure is documented below. -// +kubebuilder:validation:Optional -Handler []StreamHandlerParameters `json:"handler,omitempty" tf:"handler,omitempty"` + // Stream handler that sets plaintext Stream backend group. The structure is documented below. + // +kubebuilder:validation:Optional + Handler []StreamHandlerParameters `json:"handler,omitempty" tf:"handler,omitempty"` } // LoadBalancerSpec defines the desired state of LoadBalancer type LoadBalancerSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider LoadBalancerParameters `json:"forProvider"` + ForProvider LoadBalancerParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -1245,20 +1076,19 @@ type LoadBalancerSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider LoadBalancerInitParameters `json:"initProvider,omitempty"` + InitProvider LoadBalancerInitParameters `json:"initProvider,omitempty"` } // LoadBalancerStatus defines the observed state of LoadBalancer. type LoadBalancerStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider LoadBalancerObservation `json:"atProvider,omitempty"` + AtProvider LoadBalancerObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // LoadBalancer is the Schema for the LoadBalancers API. A Load Balancer is used for receiving incoming traffic and transmitting it to the backend endpoints specified in the ALB Target Groups. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -1268,9 +1098,9 @@ type LoadBalancerStatus struct { type LoadBalancer struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.allocationPolicy) || (has(self.initProvider) && has(self.initProvider.allocationPolicy))",message="spec.forProvider.allocationPolicy is a required parameter" - Spec LoadBalancerSpec `json:"spec"` - Status LoadBalancerStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.allocationPolicy) || (has(self.initProvider) && has(self.initProvider.allocationPolicy))",message="spec.forProvider.allocationPolicy is a required parameter" + Spec LoadBalancerSpec `json:"spec"` + Status LoadBalancerStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/alb/v1alpha1/zz_targetgroup_terraformed.go b/apis/alb/v1alpha1/zz_targetgroup_terraformed.go index ed53d8b..9f276f0 100755 --- a/apis/alb/v1alpha1/zz_targetgroup_terraformed.go +++ b/apis/alb/v1alpha1/zz_targetgroup_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this TargetGroup func (mg *TargetGroup) GetTerraformResourceType() string { - return "yandex_alb_target_group" + return "yandex_alb_target_group" } // GetConnectionDetailsMapping for this TargetGroup func (tr *TargetGroup) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this TargetGroup func (tr *TargetGroup) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this TargetGroup func (tr *TargetGroup) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this TargetGroup func (tr *TargetGroup) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this TargetGroup func (tr *TargetGroup) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this TargetGroup func (tr *TargetGroup) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this TargetGroup func (tr *TargetGroup) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this TargetGroup func (tr *TargetGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this TargetGroup using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *TargetGroup) LateInitialize(attrs []byte) (bool, error) { - params := &TargetGroupParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &TargetGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *TargetGroup) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/alb/v1alpha1/zz_targetgroup_types.go b/apis/alb/v1alpha1/zz_targetgroup_types.go index 851eaa9..b2d8246 100755 --- a/apis/alb/v1alpha1/zz_targetgroup_types.go +++ b/apis/alb/v1alpha1/zz_targetgroup_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,167 +7,152 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type TargetGroupInitParameters struct { + // An optional description of the target group. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// An optional description of the target group. Provide this property when you create the resource. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Labels to assign to this target group. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this target group. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the target group. Provided by the client when the target group is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the target group. Provided by the client when the target group is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A Target resource. The structure is documented below. -Target []TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` + // A Target resource. The structure is documented below. + Target []TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` } - type TargetGroupObservation struct { + // The target group creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// The target group creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` - -// An optional description of the target group. Provide this property when you create the resource. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // An optional description of the target group. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the target group. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The ID of the target group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Labels to assign to this target group. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this target group. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the target group. Provided by the client when the target group is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the target group. Provided by the client when the target group is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A Target resource. The structure is documented below. -Target []TargetObservation `json:"target,omitempty" tf:"target,omitempty"` + // A Target resource. The structure is documented below. + Target []TargetObservation `json:"target,omitempty" tf:"target,omitempty"` } - type TargetGroupParameters struct { + // An optional description of the target group. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// An optional description of the target group. Provide this property when you create the resource. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Labels to assign to this target group. A list of key/value pairs. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this target group. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the target group. Provided by the client when the target group is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the target group. Provided by the client when the target group is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A Target resource. The structure is documented below. -// +kubebuilder:validation:Optional -Target []TargetParameters `json:"target,omitempty" tf:"target,omitempty"` + // A Target resource. The structure is documented below. + // +kubebuilder:validation:Optional + Target []TargetParameters `json:"target,omitempty" tf:"target,omitempty"` } - type TargetInitParameters struct { + // IP address of the target. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` -// IP address of the target. -IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + PrivateIPv4Address *bool `json:"privateIpv4Address,omitempty" tf:"private_ipv4_address,omitempty"` -PrivateIPv4Address *bool `json:"privateIpv4Address,omitempty" tf:"private_ipv4_address,omitempty"` + // ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type TargetObservation struct { + // IP address of the target. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` -// IP address of the target. -IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + PrivateIPv4Address *bool `json:"privateIpv4Address,omitempty" tf:"private_ipv4_address,omitempty"` -PrivateIPv4Address *bool `json:"privateIpv4Address,omitempty" tf:"private_ipv4_address,omitempty"` - -// ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type TargetParameters struct { + // IP address of the target. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress" tf:"ip_address,omitempty"` -// IP address of the target. -// +kubebuilder:validation:Optional -IPAddress *string `json:"ipAddress" tf:"ip_address,omitempty"` + // +kubebuilder:validation:Optional + PrivateIPv4Address *bool `json:"privateIpv4Address,omitempty" tf:"private_ipv4_address,omitempty"` -// +kubebuilder:validation:Optional -PrivateIPv4Address *bool `json:"privateIpv4Address,omitempty" tf:"private_ipv4_address,omitempty"` + // ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } // TargetGroupSpec defines the desired state of TargetGroup type TargetGroupSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider TargetGroupParameters `json:"forProvider"` + ForProvider TargetGroupParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -180,20 +163,19 @@ type TargetGroupSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider TargetGroupInitParameters `json:"initProvider,omitempty"` + InitProvider TargetGroupInitParameters `json:"initProvider,omitempty"` } // TargetGroupStatus defines the observed state of TargetGroup. type TargetGroupStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider TargetGroupObservation `json:"atProvider,omitempty"` + AtProvider TargetGroupObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // TargetGroup is the Schema for the TargetGroups API. An application load balancer distributes the load across cloud resources that are combined into a target group. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/alb/v1alpha1/zz_virtualhost_terraformed.go b/apis/alb/v1alpha1/zz_virtualhost_terraformed.go index 7de24f6..462a991 100755 --- a/apis/alb/v1alpha1/zz_virtualhost_terraformed.go +++ b/apis/alb/v1alpha1/zz_virtualhost_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this VirtualHost func (mg *VirtualHost) GetTerraformResourceType() string { - return "yandex_alb_virtual_host" + return "yandex_alb_virtual_host" } // GetConnectionDetailsMapping for this VirtualHost func (tr *VirtualHost) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this VirtualHost func (tr *VirtualHost) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this VirtualHost func (tr *VirtualHost) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this VirtualHost func (tr *VirtualHost) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this VirtualHost func (tr *VirtualHost) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this VirtualHost func (tr *VirtualHost) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this VirtualHost func (tr *VirtualHost) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this VirtualHost func (tr *VirtualHost) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this VirtualHost using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *VirtualHost) LateInitialize(attrs []byte) (bool, error) { - params := &VirtualHostParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &VirtualHostParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *VirtualHost) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/alb/v1alpha1/zz_virtualhost_types.go b/apis/alb/v1alpha1/zz_virtualhost_types.go index 821b319..6e835f1 100755 --- a/apis/alb/v1alpha1/zz_virtualhost_types.go +++ b/apis/alb/v1alpha1/zz_virtualhost_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,1271 +7,1092 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AndPrincipalsHeaderValueInitParameters struct { + // Match exactly. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + // Match prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match prefix. -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` - -// Match regex. -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type AndPrincipalsHeaderValueObservation struct { + // Match exactly. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// Match prefix. -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Match prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match regex. -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type AndPrincipalsHeaderValueParameters struct { + // Match exactly. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -// +kubebuilder:validation:Optional -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// Match prefix. -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Match prefix. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match regex. -// +kubebuilder:validation:Optional -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type DirectResponseActionInitParameters struct { + // Response body text. + Body *string `json:"body,omitempty" tf:"body,omitempty"` -// Response body text. -Body *string `json:"body,omitempty" tf:"body,omitempty"` - -// The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. -Status *float64 `json:"status,omitempty" tf:"status,omitempty"` + // The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. + Status *float64 `json:"status,omitempty" tf:"status,omitempty"` } - type DirectResponseActionObservation struct { + // Response body text. + Body *string `json:"body,omitempty" tf:"body,omitempty"` -// Response body text. -Body *string `json:"body,omitempty" tf:"body,omitempty"` - -// The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. -Status *float64 `json:"status,omitempty" tf:"status,omitempty"` + // The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. + Status *float64 `json:"status,omitempty" tf:"status,omitempty"` } - type DirectResponseActionParameters struct { + // Response body text. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` -// Response body text. -// +kubebuilder:validation:Optional -Body *string `json:"body,omitempty" tf:"body,omitempty"` - -// The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. -// +kubebuilder:validation:Optional -Status *float64 `json:"status,omitempty" tf:"status,omitempty"` + // The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. + // +kubebuilder:validation:Optional + Status *float64 `json:"status,omitempty" tf:"status,omitempty"` } - type FqmnInitParameters struct { + // Match exactly. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + // Match prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match prefix. -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` - -// Match regex. -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type FqmnObservation struct { + // Match exactly. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// Match prefix. -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Match prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match regex. -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type FqmnParameters struct { + // Match exactly. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -// +kubebuilder:validation:Optional -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// Match prefix. -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Match prefix. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match regex. -// +kubebuilder:validation:Optional -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type GRPCMatchInitParameters struct { - -// If not set, all services/methods are assumed. The structure is documented below. -Fqmn []FqmnInitParameters `json:"fqmn,omitempty" tf:"fqmn,omitempty"` + // If not set, all services/methods are assumed. The structure is documented below. + Fqmn []FqmnInitParameters `json:"fqmn,omitempty" tf:"fqmn,omitempty"` } - type GRPCMatchObservation struct { - -// If not set, all services/methods are assumed. The structure is documented below. -Fqmn []FqmnObservation `json:"fqmn,omitempty" tf:"fqmn,omitempty"` + // If not set, all services/methods are assumed. The structure is documented below. + Fqmn []FqmnObservation `json:"fqmn,omitempty" tf:"fqmn,omitempty"` } - type GRPCMatchParameters struct { - -// If not set, all services/methods are assumed. The structure is documented below. -// +kubebuilder:validation:Optional -Fqmn []FqmnParameters `json:"fqmn,omitempty" tf:"fqmn,omitempty"` + // If not set, all services/methods are assumed. The structure is documented below. + // +kubebuilder:validation:Optional + Fqmn []FqmnParameters `json:"fqmn,omitempty" tf:"fqmn,omitempty"` } - type GRPCRouteActionInitParameters struct { + // If set, will automatically rewrite host. + AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` -// If set, will automatically rewrite host. -AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` - -// Backend group to route requests. -// +crossplane:generate:reference:type=BackendGroup -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group to route requests. + // +crossplane:generate:reference:type=BackendGroup + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` -// Reference to a BackendGroup to populate backendGroupId. -// +kubebuilder:validation:Optional -BackendGroupIDRef *v1.Reference `json:"backendGroupIdRef,omitempty" tf:"-"` + // Reference to a BackendGroup to populate backendGroupId. + // +kubebuilder:validation:Optional + BackendGroupIDRef *v1.Reference `json:"backendGroupIdRef,omitempty" tf:"-"` -// Selector for a BackendGroup to populate backendGroupId. -// +kubebuilder:validation:Optional -BackendGroupIDSelector *v1.Selector `json:"backendGroupIdSelector,omitempty" tf:"-"` + // Selector for a BackendGroup to populate backendGroupId. + // +kubebuilder:validation:Optional + BackendGroupIDSelector *v1.Selector `json:"backendGroupIdSelector,omitempty" tf:"-"` -// Host rewrite specifier. -HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` + // Host rewrite specifier. + HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` -// Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. -IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + // Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. + IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` -// Lower timeout may be specified by the client (using grpc-timeout header). If not set, default is 60 seconds. -MaxTimeout *string `json:"maxTimeout,omitempty" tf:"max_timeout,omitempty"` + // Lower timeout may be specified by the client (using grpc-timeout header). If not set, default is 60 seconds. + MaxTimeout *string `json:"maxTimeout,omitempty" tf:"max_timeout,omitempty"` } - type GRPCRouteActionObservation struct { + // If set, will automatically rewrite host. + AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` -// If set, will automatically rewrite host. -AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` + // Backend group to route requests. + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` -// Backend group to route requests. -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Host rewrite specifier. + HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` -// Host rewrite specifier. -HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` + // Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. + IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` -// Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. -IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` - -// Lower timeout may be specified by the client (using grpc-timeout header). If not set, default is 60 seconds. -MaxTimeout *string `json:"maxTimeout,omitempty" tf:"max_timeout,omitempty"` + // Lower timeout may be specified by the client (using grpc-timeout header). If not set, default is 60 seconds. + MaxTimeout *string `json:"maxTimeout,omitempty" tf:"max_timeout,omitempty"` } - type GRPCRouteActionParameters struct { + // If set, will automatically rewrite host. + // +kubebuilder:validation:Optional + AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` -// If set, will automatically rewrite host. -// +kubebuilder:validation:Optional -AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` - -// Backend group to route requests. -// +crossplane:generate:reference:type=BackendGroup -// +kubebuilder:validation:Optional -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group to route requests. + // +crossplane:generate:reference:type=BackendGroup + // +kubebuilder:validation:Optional + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` -// Reference to a BackendGroup to populate backendGroupId. -// +kubebuilder:validation:Optional -BackendGroupIDRef *v1.Reference `json:"backendGroupIdRef,omitempty" tf:"-"` + // Reference to a BackendGroup to populate backendGroupId. + // +kubebuilder:validation:Optional + BackendGroupIDRef *v1.Reference `json:"backendGroupIdRef,omitempty" tf:"-"` -// Selector for a BackendGroup to populate backendGroupId. -// +kubebuilder:validation:Optional -BackendGroupIDSelector *v1.Selector `json:"backendGroupIdSelector,omitempty" tf:"-"` + // Selector for a BackendGroup to populate backendGroupId. + // +kubebuilder:validation:Optional + BackendGroupIDSelector *v1.Selector `json:"backendGroupIdSelector,omitempty" tf:"-"` -// Host rewrite specifier. -// +kubebuilder:validation:Optional -HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` + // Host rewrite specifier. + // +kubebuilder:validation:Optional + HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` -// Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. -// +kubebuilder:validation:Optional -IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + // Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. + // +kubebuilder:validation:Optional + IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` -// Lower timeout may be specified by the client (using grpc-timeout header). If not set, default is 60 seconds. -// +kubebuilder:validation:Optional -MaxTimeout *string `json:"maxTimeout,omitempty" tf:"max_timeout,omitempty"` + // Lower timeout may be specified by the client (using grpc-timeout header). If not set, default is 60 seconds. + // +kubebuilder:validation:Optional + MaxTimeout *string `json:"maxTimeout,omitempty" tf:"max_timeout,omitempty"` } - type GRPCRouteInitParameters struct { + // Checks "/" prefix by default. The structure is documented below. + GRPCMatch []GRPCMatchInitParameters `json:"grpcMatch,omitempty" tf:"grpc_match,omitempty"` -// Checks "/" prefix by default. The structure is documented below. -GRPCMatch []GRPCMatchInitParameters `json:"grpcMatch,omitempty" tf:"grpc_match,omitempty"` - -// GRPC route action resource. The structure is documented below. -GRPCRouteAction []GRPCRouteActionInitParameters `json:"grpcRouteAction,omitempty" tf:"grpc_route_action,omitempty"` + // GRPC route action resource. The structure is documented below. + GRPCRouteAction []GRPCRouteActionInitParameters `json:"grpcRouteAction,omitempty" tf:"grpc_route_action,omitempty"` -// GRPC status response action resource. The structure is documented below. -GRPCStatusResponseAction []GRPCStatusResponseActionInitParameters `json:"grpcStatusResponseAction,omitempty" tf:"grpc_status_response_action,omitempty"` + // GRPC status response action resource. The structure is documented below. + GRPCStatusResponseAction []GRPCStatusResponseActionInitParameters `json:"grpcStatusResponseAction,omitempty" tf:"grpc_status_response_action,omitempty"` } - type GRPCRouteObservation struct { + // Checks "/" prefix by default. The structure is documented below. + GRPCMatch []GRPCMatchObservation `json:"grpcMatch,omitempty" tf:"grpc_match,omitempty"` -// Checks "/" prefix by default. The structure is documented below. -GRPCMatch []GRPCMatchObservation `json:"grpcMatch,omitempty" tf:"grpc_match,omitempty"` + // GRPC route action resource. The structure is documented below. + GRPCRouteAction []GRPCRouteActionObservation `json:"grpcRouteAction,omitempty" tf:"grpc_route_action,omitempty"` -// GRPC route action resource. The structure is documented below. -GRPCRouteAction []GRPCRouteActionObservation `json:"grpcRouteAction,omitempty" tf:"grpc_route_action,omitempty"` - -// GRPC status response action resource. The structure is documented below. -GRPCStatusResponseAction []GRPCStatusResponseActionObservation `json:"grpcStatusResponseAction,omitempty" tf:"grpc_status_response_action,omitempty"` + // GRPC status response action resource. The structure is documented below. + GRPCStatusResponseAction []GRPCStatusResponseActionObservation `json:"grpcStatusResponseAction,omitempty" tf:"grpc_status_response_action,omitempty"` } - type GRPCRouteParameters struct { + // Checks "/" prefix by default. The structure is documented below. + // +kubebuilder:validation:Optional + GRPCMatch []GRPCMatchParameters `json:"grpcMatch,omitempty" tf:"grpc_match,omitempty"` -// Checks "/" prefix by default. The structure is documented below. -// +kubebuilder:validation:Optional -GRPCMatch []GRPCMatchParameters `json:"grpcMatch,omitempty" tf:"grpc_match,omitempty"` - -// GRPC route action resource. The structure is documented below. -// +kubebuilder:validation:Optional -GRPCRouteAction []GRPCRouteActionParameters `json:"grpcRouteAction,omitempty" tf:"grpc_route_action,omitempty"` + // GRPC route action resource. The structure is documented below. + // +kubebuilder:validation:Optional + GRPCRouteAction []GRPCRouteActionParameters `json:"grpcRouteAction,omitempty" tf:"grpc_route_action,omitempty"` -// GRPC status response action resource. The structure is documented below. -// +kubebuilder:validation:Optional -GRPCStatusResponseAction []GRPCStatusResponseActionParameters `json:"grpcStatusResponseAction,omitempty" tf:"grpc_status_response_action,omitempty"` + // GRPC status response action resource. The structure is documented below. + // +kubebuilder:validation:Optional + GRPCStatusResponseAction []GRPCStatusResponseActionParameters `json:"grpcStatusResponseAction,omitempty" tf:"grpc_status_response_action,omitempty"` } - type GRPCStatusResponseActionInitParameters struct { - -// The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type GRPCStatusResponseActionObservation struct { - -// The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type GRPCStatusResponseActionParameters struct { - -// The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. -// +kubebuilder:validation:Optional -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // The status of the response. Supported values are: ok, invalid_argumet, not_found, permission_denied, unauthenticated, unimplemented, internal, unavailable. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type HTTPMatchInitParameters struct { + // List of methods(strings). + // +listType=set + HTTPMethod []*string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -// List of methods(strings). -// +listType=set -HTTPMethod []*string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` - -// If not set, '/' is assumed. The structure is documented below. -Path []PathInitParameters `json:"path,omitempty" tf:"path,omitempty"` + // If not set, '/' is assumed. The structure is documented below. + Path []PathInitParameters `json:"path,omitempty" tf:"path,omitempty"` } - type HTTPMatchObservation struct { + // List of methods(strings). + // +listType=set + HTTPMethod []*string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -// List of methods(strings). -// +listType=set -HTTPMethod []*string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` - -// If not set, '/' is assumed. The structure is documented below. -Path []PathObservation `json:"path,omitempty" tf:"path,omitempty"` + // If not set, '/' is assumed. The structure is documented below. + Path []PathObservation `json:"path,omitempty" tf:"path,omitempty"` } - type HTTPMatchParameters struct { + // List of methods(strings). + // +kubebuilder:validation:Optional + // +listType=set + HTTPMethod []*string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -// List of methods(strings). -// +kubebuilder:validation:Optional -// +listType=set -HTTPMethod []*string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` - -// If not set, '/' is assumed. The structure is documented below. -// +kubebuilder:validation:Optional -Path []PathParameters `json:"path,omitempty" tf:"path,omitempty"` + // If not set, '/' is assumed. The structure is documented below. + // +kubebuilder:validation:Optional + Path []PathParameters `json:"path,omitempty" tf:"path,omitempty"` } - type HTTPRouteActionInitParameters struct { + // If set, will automatically rewrite host. + AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` -// If set, will automatically rewrite host. -AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` - -// Backend group to route requests. -// +crossplane:generate:reference:type=BackendGroup -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group to route requests. + // +crossplane:generate:reference:type=BackendGroup + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` -// Reference to a BackendGroup to populate backendGroupId. -// +kubebuilder:validation:Optional -BackendGroupIDRef *v1.Reference `json:"backendGroupIdRef,omitempty" tf:"-"` + // Reference to a BackendGroup to populate backendGroupId. + // +kubebuilder:validation:Optional + BackendGroupIDRef *v1.Reference `json:"backendGroupIdRef,omitempty" tf:"-"` -// Selector for a BackendGroup to populate backendGroupId. -// +kubebuilder:validation:Optional -BackendGroupIDSelector *v1.Selector `json:"backendGroupIdSelector,omitempty" tf:"-"` + // Selector for a BackendGroup to populate backendGroupId. + // +kubebuilder:validation:Optional + BackendGroupIDSelector *v1.Selector `json:"backendGroupIdSelector,omitempty" tf:"-"` -// Host rewrite specifier. -HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` + // Host rewrite specifier. + HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` -// Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. -IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + // Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. + IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` -// If not empty, matched path prefix will be replaced by this value. -PrefixRewrite *string `json:"prefixRewrite,omitempty" tf:"prefix_rewrite,omitempty"` + // If not empty, matched path prefix will be replaced by this value. + PrefixRewrite *string `json:"prefixRewrite,omitempty" tf:"prefix_rewrite,omitempty"` -// Specifies the request timeout (overall time request processing is allowed to take) for the route. If not set, default is 60 seconds. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + // Specifies the request timeout (overall time request processing is allowed to take) for the route. If not set, default is 60 seconds. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// List of upgrade types. Only specified upgrade types will be allowed. For example, "websocket". -// +listType=set -UpgradeTypes []*string `json:"upgradeTypes,omitempty" tf:"upgrade_types,omitempty"` + // List of upgrade types. Only specified upgrade types will be allowed. For example, "websocket". + // +listType=set + UpgradeTypes []*string `json:"upgradeTypes,omitempty" tf:"upgrade_types,omitempty"` } - type HTTPRouteActionObservation struct { + // If set, will automatically rewrite host. + AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` -// If set, will automatically rewrite host. -AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` - -// Backend group to route requests. -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Backend group to route requests. + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` -// Host rewrite specifier. -HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` + // Host rewrite specifier. + HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` -// Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. -IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + // Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. + IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` -// If not empty, matched path prefix will be replaced by this value. -PrefixRewrite *string `json:"prefixRewrite,omitempty" tf:"prefix_rewrite,omitempty"` + // If not empty, matched path prefix will be replaced by this value. + PrefixRewrite *string `json:"prefixRewrite,omitempty" tf:"prefix_rewrite,omitempty"` -// Specifies the request timeout (overall time request processing is allowed to take) for the route. If not set, default is 60 seconds. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + // Specifies the request timeout (overall time request processing is allowed to take) for the route. If not set, default is 60 seconds. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// List of upgrade types. Only specified upgrade types will be allowed. For example, "websocket". -// +listType=set -UpgradeTypes []*string `json:"upgradeTypes,omitempty" tf:"upgrade_types,omitempty"` + // List of upgrade types. Only specified upgrade types will be allowed. For example, "websocket". + // +listType=set + UpgradeTypes []*string `json:"upgradeTypes,omitempty" tf:"upgrade_types,omitempty"` } - type HTTPRouteActionParameters struct { + // If set, will automatically rewrite host. + // +kubebuilder:validation:Optional + AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` -// If set, will automatically rewrite host. -// +kubebuilder:validation:Optional -AutoHostRewrite *bool `json:"autoHostRewrite,omitempty" tf:"auto_host_rewrite,omitempty"` + // Backend group to route requests. + // +crossplane:generate:reference:type=BackendGroup + // +kubebuilder:validation:Optional + BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` -// Backend group to route requests. -// +crossplane:generate:reference:type=BackendGroup -// +kubebuilder:validation:Optional -BackendGroupID *string `json:"backendGroupId,omitempty" tf:"backend_group_id,omitempty"` + // Reference to a BackendGroup to populate backendGroupId. + // +kubebuilder:validation:Optional + BackendGroupIDRef *v1.Reference `json:"backendGroupIdRef,omitempty" tf:"-"` -// Reference to a BackendGroup to populate backendGroupId. -// +kubebuilder:validation:Optional -BackendGroupIDRef *v1.Reference `json:"backendGroupIdRef,omitempty" tf:"-"` + // Selector for a BackendGroup to populate backendGroupId. + // +kubebuilder:validation:Optional + BackendGroupIDSelector *v1.Selector `json:"backendGroupIdSelector,omitempty" tf:"-"` -// Selector for a BackendGroup to populate backendGroupId. -// +kubebuilder:validation:Optional -BackendGroupIDSelector *v1.Selector `json:"backendGroupIdSelector,omitempty" tf:"-"` + // Host rewrite specifier. + // +kubebuilder:validation:Optional + HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` -// Host rewrite specifier. -// +kubebuilder:validation:Optional -HostRewrite *string `json:"hostRewrite,omitempty" tf:"host_rewrite,omitempty"` + // Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. + // +kubebuilder:validation:Optional + IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` -// Specifies the idle timeout (time without any data transfer for the active request) for the route. It is useful for streaming scenarios (i.e. long-polling, server-sent events) - one should set idle_timeout to something meaningful and timeout to the maximum time the stream is allowed to be alive. If not specified, there is no per-route idle timeout. -// +kubebuilder:validation:Optional -IdleTimeout *string `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + // If not empty, matched path prefix will be replaced by this value. + // +kubebuilder:validation:Optional + PrefixRewrite *string `json:"prefixRewrite,omitempty" tf:"prefix_rewrite,omitempty"` -// If not empty, matched path prefix will be replaced by this value. -// +kubebuilder:validation:Optional -PrefixRewrite *string `json:"prefixRewrite,omitempty" tf:"prefix_rewrite,omitempty"` + // Specifies the request timeout (overall time request processing is allowed to take) for the route. If not set, default is 60 seconds. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Specifies the request timeout (overall time request processing is allowed to take) for the route. If not set, default is 60 seconds. -// +kubebuilder:validation:Optional -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` - -// List of upgrade types. Only specified upgrade types will be allowed. For example, "websocket". -// +kubebuilder:validation:Optional -// +listType=set -UpgradeTypes []*string `json:"upgradeTypes,omitempty" tf:"upgrade_types,omitempty"` + // List of upgrade types. Only specified upgrade types will be allowed. For example, "websocket". + // +kubebuilder:validation:Optional + // +listType=set + UpgradeTypes []*string `json:"upgradeTypes,omitempty" tf:"upgrade_types,omitempty"` } - type HTTPRouteInitParameters struct { + // Direct response action resource. The structure is documented below. + DirectResponseAction []DirectResponseActionInitParameters `json:"directResponseAction,omitempty" tf:"direct_response_action,omitempty"` -// Direct response action resource. The structure is documented below. -DirectResponseAction []DirectResponseActionInitParameters `json:"directResponseAction,omitempty" tf:"direct_response_action,omitempty"` - -// Checks "/" prefix by default. The structure is documented below. -HTTPMatch []HTTPMatchInitParameters `json:"httpMatch,omitempty" tf:"http_match,omitempty"` + // Checks "/" prefix by default. The structure is documented below. + HTTPMatch []HTTPMatchInitParameters `json:"httpMatch,omitempty" tf:"http_match,omitempty"` -// HTTP route action resource. The structure is documented below. -HTTPRouteAction []HTTPRouteActionInitParameters `json:"httpRouteAction,omitempty" tf:"http_route_action,omitempty"` + // HTTP route action resource. The structure is documented below. + HTTPRouteAction []HTTPRouteActionInitParameters `json:"httpRouteAction,omitempty" tf:"http_route_action,omitempty"` -// Redirect action resource. The structure is documented below. -RedirectAction []RedirectActionInitParameters `json:"redirectAction,omitempty" tf:"redirect_action,omitempty"` + // Redirect action resource. The structure is documented below. + RedirectAction []RedirectActionInitParameters `json:"redirectAction,omitempty" tf:"redirect_action,omitempty"` } - type HTTPRouteObservation struct { + // Direct response action resource. The structure is documented below. + DirectResponseAction []DirectResponseActionObservation `json:"directResponseAction,omitempty" tf:"direct_response_action,omitempty"` -// Direct response action resource. The structure is documented below. -DirectResponseAction []DirectResponseActionObservation `json:"directResponseAction,omitempty" tf:"direct_response_action,omitempty"` - -// Checks "/" prefix by default. The structure is documented below. -HTTPMatch []HTTPMatchObservation `json:"httpMatch,omitempty" tf:"http_match,omitempty"` + // Checks "/" prefix by default. The structure is documented below. + HTTPMatch []HTTPMatchObservation `json:"httpMatch,omitempty" tf:"http_match,omitempty"` -// HTTP route action resource. The structure is documented below. -HTTPRouteAction []HTTPRouteActionObservation `json:"httpRouteAction,omitempty" tf:"http_route_action,omitempty"` + // HTTP route action resource. The structure is documented below. + HTTPRouteAction []HTTPRouteActionObservation `json:"httpRouteAction,omitempty" tf:"http_route_action,omitempty"` -// Redirect action resource. The structure is documented below. -RedirectAction []RedirectActionObservation `json:"redirectAction,omitempty" tf:"redirect_action,omitempty"` + // Redirect action resource. The structure is documented below. + RedirectAction []RedirectActionObservation `json:"redirectAction,omitempty" tf:"redirect_action,omitempty"` } - type HTTPRouteParameters struct { + // Direct response action resource. The structure is documented below. + // +kubebuilder:validation:Optional + DirectResponseAction []DirectResponseActionParameters `json:"directResponseAction,omitempty" tf:"direct_response_action,omitempty"` -// Direct response action resource. The structure is documented below. -// +kubebuilder:validation:Optional -DirectResponseAction []DirectResponseActionParameters `json:"directResponseAction,omitempty" tf:"direct_response_action,omitempty"` + // Checks "/" prefix by default. The structure is documented below. + // +kubebuilder:validation:Optional + HTTPMatch []HTTPMatchParameters `json:"httpMatch,omitempty" tf:"http_match,omitempty"` -// Checks "/" prefix by default. The structure is documented below. -// +kubebuilder:validation:Optional -HTTPMatch []HTTPMatchParameters `json:"httpMatch,omitempty" tf:"http_match,omitempty"` + // HTTP route action resource. The structure is documented below. + // +kubebuilder:validation:Optional + HTTPRouteAction []HTTPRouteActionParameters `json:"httpRouteAction,omitempty" tf:"http_route_action,omitempty"` -// HTTP route action resource. The structure is documented below. -// +kubebuilder:validation:Optional -HTTPRouteAction []HTTPRouteActionParameters `json:"httpRouteAction,omitempty" tf:"http_route_action,omitempty"` - -// Redirect action resource. The structure is documented below. -// +kubebuilder:validation:Optional -RedirectAction []RedirectActionParameters `json:"redirectAction,omitempty" tf:"redirect_action,omitempty"` + // Redirect action resource. The structure is documented below. + // +kubebuilder:validation:Optional + RedirectAction []RedirectActionParameters `json:"redirectAction,omitempty" tf:"redirect_action,omitempty"` } - type HeaderValueInitParameters struct { + // Match exactly. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// Match prefix. -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Match prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match regex. -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type HeaderValueObservation struct { + // Match exactly. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// Match prefix. -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Match prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match regex. -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type HeaderValueParameters struct { + // Match exactly. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -// +kubebuilder:validation:Optional -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// Match prefix. -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Match prefix. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match regex. -// +kubebuilder:validation:Optional -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type ModifyRequestHeadersInitParameters struct { + // Append string to the header value. + Append *string `json:"append,omitempty" tf:"append,omitempty"` -// Append string to the header value. -Append *string `json:"append,omitempty" tf:"append,omitempty"` + // name of the header to modify. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// name of the header to modify. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // If set, remove the header. + Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` -// If set, remove the header. -Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` - -// New value for a header. Header values support the following formatters. -Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` + // New value for a header. Header values support the following formatters. + Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` } - type ModifyRequestHeadersObservation struct { + // Append string to the header value. + Append *string `json:"append,omitempty" tf:"append,omitempty"` -// Append string to the header value. -Append *string `json:"append,omitempty" tf:"append,omitempty"` - -// name of the header to modify. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // name of the header to modify. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// If set, remove the header. -Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` + // If set, remove the header. + Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` -// New value for a header. Header values support the following formatters. -Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` + // New value for a header. Header values support the following formatters. + Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` } - type ModifyRequestHeadersParameters struct { + // Append string to the header value. + // +kubebuilder:validation:Optional + Append *string `json:"append,omitempty" tf:"append,omitempty"` -// Append string to the header value. -// +kubebuilder:validation:Optional -Append *string `json:"append,omitempty" tf:"append,omitempty"` - -// name of the header to modify. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // name of the header to modify. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// If set, remove the header. -// +kubebuilder:validation:Optional -Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` + // If set, remove the header. + // +kubebuilder:validation:Optional + Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` -// New value for a header. Header values support the following formatters. -// +kubebuilder:validation:Optional -Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` + // New value for a header. Header values support the following formatters. + // +kubebuilder:validation:Optional + Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` } - type ModifyResponseHeadersInitParameters struct { + // Append string to the header value. + Append *string `json:"append,omitempty" tf:"append,omitempty"` -// Append string to the header value. -Append *string `json:"append,omitempty" tf:"append,omitempty"` + // name of the route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// name of the route. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // If set, remove the header. + Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` -// If set, remove the header. -Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` - -// New value for a header. Header values support the following formatters. -Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` + // New value for a header. Header values support the following formatters. + Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` } - type ModifyResponseHeadersObservation struct { + // Append string to the header value. + Append *string `json:"append,omitempty" tf:"append,omitempty"` -// Append string to the header value. -Append *string `json:"append,omitempty" tf:"append,omitempty"` + // name of the route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// name of the route. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // If set, remove the header. + Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` -// If set, remove the header. -Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` - -// New value for a header. Header values support the following formatters. -Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` + // New value for a header. Header values support the following formatters. + Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` } - type ModifyResponseHeadersParameters struct { + // Append string to the header value. + // +kubebuilder:validation:Optional + Append *string `json:"append,omitempty" tf:"append,omitempty"` -// Append string to the header value. -// +kubebuilder:validation:Optional -Append *string `json:"append,omitempty" tf:"append,omitempty"` - -// name of the route. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // name of the route. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// If set, remove the header. -// +kubebuilder:validation:Optional -Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` + // If set, remove the header. + // +kubebuilder:validation:Optional + Remove *bool `json:"remove,omitempty" tf:"remove,omitempty"` -// New value for a header. Header values support the following formatters. -// +kubebuilder:validation:Optional -Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` + // New value for a header. Header values support the following formatters. + // +kubebuilder:validation:Optional + Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` } - type PathInitParameters struct { + // Match exactly. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// Match prefix. -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Match prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match regex. -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type PathObservation struct { + // Match exactly. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + // Match prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match prefix. -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` - -// Match regex. -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type PathParameters struct { + // Match exactly. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` -// Match exactly. -// +kubebuilder:validation:Optional -Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` - -// Match prefix. -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Match prefix. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Match regex. -// +kubebuilder:validation:Optional -Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + // Match regex. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` } - type PrincipalsAndPrincipalsHeaderInitParameters struct { + // name of the route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// name of the route. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []HeaderValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + Value []HeaderValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type PrincipalsAndPrincipalsHeaderObservation struct { + // name of the route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// name of the route. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []HeaderValueObservation `json:"value,omitempty" tf:"value,omitempty"` + Value []HeaderValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type PrincipalsAndPrincipalsHeaderParameters struct { + // name of the route. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// name of the route. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// +kubebuilder:validation:Optional -Value []HeaderValueParameters `json:"value,omitempty" tf:"value,omitempty"` + // +kubebuilder:validation:Optional + Value []HeaderValueParameters `json:"value,omitempty" tf:"value,omitempty"` } - type PrincipalsAndPrincipalsInitParameters struct { + Any *bool `json:"any,omitempty" tf:"any,omitempty"` + Header []PrincipalsAndPrincipalsHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` -Any *bool `json:"any,omitempty" tf:"any,omitempty"` - -Header []PrincipalsAndPrincipalsHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` - -RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` + RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` } - type PrincipalsAndPrincipalsObservation struct { + Any *bool `json:"any,omitempty" tf:"any,omitempty"` + Header []PrincipalsAndPrincipalsHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` -Any *bool `json:"any,omitempty" tf:"any,omitempty"` - -Header []PrincipalsAndPrincipalsHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` - -RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` + RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` } - type PrincipalsAndPrincipalsParameters struct { + // +kubebuilder:validation:Optional + Any *bool `json:"any,omitempty" tf:"any,omitempty"` -// +kubebuilder:validation:Optional -Any *bool `json:"any,omitempty" tf:"any,omitempty"` - -// +kubebuilder:validation:Optional -Header []PrincipalsAndPrincipalsHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + // +kubebuilder:validation:Optional + Header []PrincipalsAndPrincipalsHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` -// +kubebuilder:validation:Optional -RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` + // +kubebuilder:validation:Optional + RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` } - type RbacPrincipalsAndPrincipalsHeaderInitParameters struct { + // name of the route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// name of the route. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []AndPrincipalsHeaderValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + Value []AndPrincipalsHeaderValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type RbacPrincipalsAndPrincipalsHeaderObservation struct { + // name of the route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// name of the route. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []AndPrincipalsHeaderValueObservation `json:"value,omitempty" tf:"value,omitempty"` + Value []AndPrincipalsHeaderValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type RbacPrincipalsAndPrincipalsHeaderParameters struct { + // name of the route. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// name of the route. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// +kubebuilder:validation:Optional -Value []AndPrincipalsHeaderValueParameters `json:"value,omitempty" tf:"value,omitempty"` + // +kubebuilder:validation:Optional + Value []AndPrincipalsHeaderValueParameters `json:"value,omitempty" tf:"value,omitempty"` } - type RbacPrincipalsAndPrincipalsInitParameters struct { + Any *bool `json:"any,omitempty" tf:"any,omitempty"` + Header []RbacPrincipalsAndPrincipalsHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` -Any *bool `json:"any,omitempty" tf:"any,omitempty"` - -Header []RbacPrincipalsAndPrincipalsHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` - -RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` + RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` } - type RbacPrincipalsAndPrincipalsObservation struct { + Any *bool `json:"any,omitempty" tf:"any,omitempty"` + Header []RbacPrincipalsAndPrincipalsHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` -Any *bool `json:"any,omitempty" tf:"any,omitempty"` - -Header []RbacPrincipalsAndPrincipalsHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` - -RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` + RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` } - type RbacPrincipalsAndPrincipalsParameters struct { + // +kubebuilder:validation:Optional + Any *bool `json:"any,omitempty" tf:"any,omitempty"` -// +kubebuilder:validation:Optional -Any *bool `json:"any,omitempty" tf:"any,omitempty"` - -// +kubebuilder:validation:Optional -Header []RbacPrincipalsAndPrincipalsHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + // +kubebuilder:validation:Optional + Header []RbacPrincipalsAndPrincipalsHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` -// +kubebuilder:validation:Optional -RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` + // +kubebuilder:validation:Optional + RemoteIP *string `json:"remoteIp,omitempty" tf:"remote_ip,omitempty"` } - type RbacPrincipalsInitParameters struct { - - -AndPrincipals []PrincipalsAndPrincipalsInitParameters `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` + AndPrincipals []PrincipalsAndPrincipalsInitParameters `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` } - type RbacPrincipalsObservation struct { - - -AndPrincipals []PrincipalsAndPrincipalsObservation `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` + AndPrincipals []PrincipalsAndPrincipalsObservation `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` } - type RbacPrincipalsParameters struct { - -// +kubebuilder:validation:Optional -AndPrincipals []PrincipalsAndPrincipalsParameters `json:"andPrincipals" tf:"and_principals,omitempty"` + // +kubebuilder:validation:Optional + AndPrincipals []PrincipalsAndPrincipalsParameters `json:"andPrincipals" tf:"and_principals,omitempty"` } - type RedirectActionInitParameters struct { + RemoveQuery *bool `json:"removeQuery,omitempty" tf:"remove_query,omitempty"` + // Replaces hostname. + ReplaceHost *string `json:"replaceHost,omitempty" tf:"replace_host,omitempty"` -RemoveQuery *bool `json:"removeQuery,omitempty" tf:"remove_query,omitempty"` + // Replace path. + ReplacePath *string `json:"replacePath,omitempty" tf:"replace_path,omitempty"` -// Replaces hostname. -ReplaceHost *string `json:"replaceHost,omitempty" tf:"replace_host,omitempty"` + // Replaces port. + ReplacePort *float64 `json:"replacePort,omitempty" tf:"replace_port,omitempty"` -// Replace path. -ReplacePath *string `json:"replacePath,omitempty" tf:"replace_path,omitempty"` + // Replace only matched prefix. Example: match:{ prefix_match: "/some" } redirect: { replace_prefix: "/other" } will redirect "/something" to "/otherthing". + ReplacePrefix *string `json:"replacePrefix,omitempty" tf:"replace_prefix,omitempty"` -// Replaces port. -ReplacePort *float64 `json:"replacePort,omitempty" tf:"replace_port,omitempty"` + // Replaces scheme. If the original scheme is http or https, will also remove the 80 or 443 port, if present. + ReplaceScheme *string `json:"replaceScheme,omitempty" tf:"replace_scheme,omitempty"` -// Replace only matched prefix. Example: match:{ prefix_match: "/some" } redirect: { replace_prefix: "/other" } will redirect "/something" to "/otherthing". -ReplacePrefix *string `json:"replacePrefix,omitempty" tf:"replace_prefix,omitempty"` - -// Replaces scheme. If the original scheme is http or https, will also remove the 80 or 443 port, if present. -ReplaceScheme *string `json:"replaceScheme,omitempty" tf:"replace_scheme,omitempty"` - -// The HTTP status code to use in the redirect response. Supported values are: moved_permanently, found, see_other, temporary_redirect, permanent_redirect. -ResponseCode *string `json:"responseCode,omitempty" tf:"response_code,omitempty"` + // The HTTP status code to use in the redirect response. Supported values are: moved_permanently, found, see_other, temporary_redirect, permanent_redirect. + ResponseCode *string `json:"responseCode,omitempty" tf:"response_code,omitempty"` } - type RedirectActionObservation struct { + RemoveQuery *bool `json:"removeQuery,omitempty" tf:"remove_query,omitempty"` + // Replaces hostname. + ReplaceHost *string `json:"replaceHost,omitempty" tf:"replace_host,omitempty"` -RemoveQuery *bool `json:"removeQuery,omitempty" tf:"remove_query,omitempty"` + // Replace path. + ReplacePath *string `json:"replacePath,omitempty" tf:"replace_path,omitempty"` -// Replaces hostname. -ReplaceHost *string `json:"replaceHost,omitempty" tf:"replace_host,omitempty"` + // Replaces port. + ReplacePort *float64 `json:"replacePort,omitempty" tf:"replace_port,omitempty"` -// Replace path. -ReplacePath *string `json:"replacePath,omitempty" tf:"replace_path,omitempty"` + // Replace only matched prefix. Example: match:{ prefix_match: "/some" } redirect: { replace_prefix: "/other" } will redirect "/something" to "/otherthing". + ReplacePrefix *string `json:"replacePrefix,omitempty" tf:"replace_prefix,omitempty"` -// Replaces port. -ReplacePort *float64 `json:"replacePort,omitempty" tf:"replace_port,omitempty"` + // Replaces scheme. If the original scheme is http or https, will also remove the 80 or 443 port, if present. + ReplaceScheme *string `json:"replaceScheme,omitempty" tf:"replace_scheme,omitempty"` -// Replace only matched prefix. Example: match:{ prefix_match: "/some" } redirect: { replace_prefix: "/other" } will redirect "/something" to "/otherthing". -ReplacePrefix *string `json:"replacePrefix,omitempty" tf:"replace_prefix,omitempty"` - -// Replaces scheme. If the original scheme is http or https, will also remove the 80 or 443 port, if present. -ReplaceScheme *string `json:"replaceScheme,omitempty" tf:"replace_scheme,omitempty"` - -// The HTTP status code to use in the redirect response. Supported values are: moved_permanently, found, see_other, temporary_redirect, permanent_redirect. -ResponseCode *string `json:"responseCode,omitempty" tf:"response_code,omitempty"` + // The HTTP status code to use in the redirect response. Supported values are: moved_permanently, found, see_other, temporary_redirect, permanent_redirect. + ResponseCode *string `json:"responseCode,omitempty" tf:"response_code,omitempty"` } - type RedirectActionParameters struct { + // +kubebuilder:validation:Optional + RemoveQuery *bool `json:"removeQuery,omitempty" tf:"remove_query,omitempty"` -// +kubebuilder:validation:Optional -RemoveQuery *bool `json:"removeQuery,omitempty" tf:"remove_query,omitempty"` + // Replaces hostname. + // +kubebuilder:validation:Optional + ReplaceHost *string `json:"replaceHost,omitempty" tf:"replace_host,omitempty"` -// Replaces hostname. -// +kubebuilder:validation:Optional -ReplaceHost *string `json:"replaceHost,omitempty" tf:"replace_host,omitempty"` + // Replace path. + // +kubebuilder:validation:Optional + ReplacePath *string `json:"replacePath,omitempty" tf:"replace_path,omitempty"` -// Replace path. -// +kubebuilder:validation:Optional -ReplacePath *string `json:"replacePath,omitempty" tf:"replace_path,omitempty"` + // Replaces port. + // +kubebuilder:validation:Optional + ReplacePort *float64 `json:"replacePort,omitempty" tf:"replace_port,omitempty"` -// Replaces port. -// +kubebuilder:validation:Optional -ReplacePort *float64 `json:"replacePort,omitempty" tf:"replace_port,omitempty"` + // Replace only matched prefix. Example: match:{ prefix_match: "/some" } redirect: { replace_prefix: "/other" } will redirect "/something" to "/otherthing". + // +kubebuilder:validation:Optional + ReplacePrefix *string `json:"replacePrefix,omitempty" tf:"replace_prefix,omitempty"` -// Replace only matched prefix. Example: match:{ prefix_match: "/some" } redirect: { replace_prefix: "/other" } will redirect "/something" to "/otherthing". -// +kubebuilder:validation:Optional -ReplacePrefix *string `json:"replacePrefix,omitempty" tf:"replace_prefix,omitempty"` + // Replaces scheme. If the original scheme is http or https, will also remove the 80 or 443 port, if present. + // +kubebuilder:validation:Optional + ReplaceScheme *string `json:"replaceScheme,omitempty" tf:"replace_scheme,omitempty"` -// Replaces scheme. If the original scheme is http or https, will also remove the 80 or 443 port, if present. -// +kubebuilder:validation:Optional -ReplaceScheme *string `json:"replaceScheme,omitempty" tf:"replace_scheme,omitempty"` - -// The HTTP status code to use in the redirect response. Supported values are: moved_permanently, found, see_other, temporary_redirect, permanent_redirect. -// +kubebuilder:validation:Optional -ResponseCode *string `json:"responseCode,omitempty" tf:"response_code,omitempty"` + // The HTTP status code to use in the redirect response. Supported values are: moved_permanently, found, see_other, temporary_redirect, permanent_redirect. + // +kubebuilder:validation:Optional + ResponseCode *string `json:"responseCode,omitempty" tf:"response_code,omitempty"` } - type RouteInitParameters struct { + // GRPC route resource. The structure is documented below. + GRPCRoute []GRPCRouteInitParameters `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` -// GRPC route resource. The structure is documented below. -GRPCRoute []GRPCRouteInitParameters `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` - -// HTTP route resource. The structure is documented below. -HTTPRoute []HTTPRouteInitParameters `json:"httpRoute,omitempty" tf:"http_route,omitempty"` + // HTTP route resource. The structure is documented below. + HTTPRoute []HTTPRouteInitParameters `json:"httpRoute,omitempty" tf:"http_route,omitempty"` -// name of the route. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // name of the route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Route options for the virtual host. The structure is documented below. -RouteOptions []RouteRouteOptionsInitParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` + // Route options for the virtual host. The structure is documented below. + RouteOptions []RouteRouteOptionsInitParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` } - type RouteObservation struct { + // GRPC route resource. The structure is documented below. + GRPCRoute []GRPCRouteObservation `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` -// GRPC route resource. The structure is documented below. -GRPCRoute []GRPCRouteObservation `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` - -// HTTP route resource. The structure is documented below. -HTTPRoute []HTTPRouteObservation `json:"httpRoute,omitempty" tf:"http_route,omitempty"` + // HTTP route resource. The structure is documented below. + HTTPRoute []HTTPRouteObservation `json:"httpRoute,omitempty" tf:"http_route,omitempty"` -// name of the route. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // name of the route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Route options for the virtual host. The structure is documented below. -RouteOptions []RouteRouteOptionsObservation `json:"routeOptions,omitempty" tf:"route_options,omitempty"` + // Route options for the virtual host. The structure is documented below. + RouteOptions []RouteRouteOptionsObservation `json:"routeOptions,omitempty" tf:"route_options,omitempty"` } - type RouteOptionsRbacInitParameters struct { + Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Principals []RbacPrincipalsInitParameters `json:"principals,omitempty" tf:"principals,omitempty"` + Principals []RbacPrincipalsInitParameters `json:"principals,omitempty" tf:"principals,omitempty"` } - type RouteOptionsRbacObservation struct { + Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Principals []RbacPrincipalsObservation `json:"principals,omitempty" tf:"principals,omitempty"` + Principals []RbacPrincipalsObservation `json:"principals,omitempty" tf:"principals,omitempty"` } - type RouteOptionsRbacParameters struct { + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` -// +kubebuilder:validation:Optional -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -// +kubebuilder:validation:Optional -Principals []RbacPrincipalsParameters `json:"principals" tf:"principals,omitempty"` + // +kubebuilder:validation:Optional + Principals []RbacPrincipalsParameters `json:"principals" tf:"principals,omitempty"` } - type RouteOptionsRbacPrincipalsInitParameters struct { - - -AndPrincipals []RbacPrincipalsAndPrincipalsInitParameters `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` + AndPrincipals []RbacPrincipalsAndPrincipalsInitParameters `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` } - type RouteOptionsRbacPrincipalsObservation struct { - - -AndPrincipals []RbacPrincipalsAndPrincipalsObservation `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` + AndPrincipals []RbacPrincipalsAndPrincipalsObservation `json:"andPrincipals,omitempty" tf:"and_principals,omitempty"` } - type RouteOptionsRbacPrincipalsParameters struct { - -// +kubebuilder:validation:Optional -AndPrincipals []RbacPrincipalsAndPrincipalsParameters `json:"andPrincipals" tf:"and_principals,omitempty"` + // +kubebuilder:validation:Optional + AndPrincipals []RbacPrincipalsAndPrincipalsParameters `json:"andPrincipals" tf:"and_principals,omitempty"` } - type RouteParameters struct { + // GRPC route resource. The structure is documented below. + // +kubebuilder:validation:Optional + GRPCRoute []GRPCRouteParameters `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` -// GRPC route resource. The structure is documented below. -// +kubebuilder:validation:Optional -GRPCRoute []GRPCRouteParameters `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` - -// HTTP route resource. The structure is documented below. -// +kubebuilder:validation:Optional -HTTPRoute []HTTPRouteParameters `json:"httpRoute,omitempty" tf:"http_route,omitempty"` + // HTTP route resource. The structure is documented below. + // +kubebuilder:validation:Optional + HTTPRoute []HTTPRouteParameters `json:"httpRoute,omitempty" tf:"http_route,omitempty"` -// name of the route. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // name of the route. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Route options for the virtual host. The structure is documented below. -// +kubebuilder:validation:Optional -RouteOptions []RouteRouteOptionsParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` + // Route options for the virtual host. The structure is documented below. + // +kubebuilder:validation:Optional + RouteOptions []RouteRouteOptionsParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` } - type RouteRouteOptionsInitParameters struct { + // RBAC configuration. + Rbac []RouteOptionsRbacInitParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` -// RBAC configuration. -Rbac []RouteOptionsRbacInitParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` - -// SWS profile ID. -SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` + // SWS profile ID. + SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` } - type RouteRouteOptionsObservation struct { + // RBAC configuration. + Rbac []RouteOptionsRbacObservation `json:"rbac,omitempty" tf:"rbac,omitempty"` -// RBAC configuration. -Rbac []RouteOptionsRbacObservation `json:"rbac,omitempty" tf:"rbac,omitempty"` - -// SWS profile ID. -SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` + // SWS profile ID. + SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` } - type RouteRouteOptionsParameters struct { + // RBAC configuration. + // +kubebuilder:validation:Optional + Rbac []RouteOptionsRbacParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` -// RBAC configuration. -// +kubebuilder:validation:Optional -Rbac []RouteOptionsRbacParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` - -// SWS profile ID. -// +kubebuilder:validation:Optional -SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` + // SWS profile ID. + // +kubebuilder:validation:Optional + SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` } - type VirtualHostInitParameters struct { + // A list of domains (host/authority header) that will be matched to this virtual host. Wildcard hosts are supported in the form of '.foo.com' or '-bar.foo.com'. If not specified, all domains will be matched. + // +listType=set + Authority []*string `json:"authority,omitempty" tf:"authority,omitempty"` -// A list of domains (host/authority header) that will be matched to this virtual host. Wildcard hosts are supported in the form of '.foo.com' or '-bar.foo.com'. If not specified, all domains will be matched. -// +listType=set -Authority []*string `json:"authority,omitempty" tf:"authority,omitempty"` - -// The ID of the HTTP router to which the virtual host belongs. -// +crossplane:generate:reference:type=HTTPRouter -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // The ID of the HTTP router to which the virtual host belongs. + // +crossplane:generate:reference:type=HTTPRouter + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// Reference to a HTTPRouter to populate httpRouterId. -// +kubebuilder:validation:Optional -HTTPRouterIDRef *v1.Reference `json:"httpRouterIdRef,omitempty" tf:"-"` + // Reference to a HTTPRouter to populate httpRouterId. + // +kubebuilder:validation:Optional + HTTPRouterIDRef *v1.Reference `json:"httpRouterIdRef,omitempty" tf:"-"` -// Selector for a HTTPRouter to populate httpRouterId. -// +kubebuilder:validation:Optional -HTTPRouterIDSelector *v1.Selector `json:"httpRouterIdSelector,omitempty" tf:"-"` + // Selector for a HTTPRouter to populate httpRouterId. + // +kubebuilder:validation:Optional + HTTPRouterIDSelector *v1.Selector `json:"httpRouterIdSelector,omitempty" tf:"-"` -// Apply the following modifications to the request headers. The structure is documented below. -ModifyRequestHeaders []ModifyRequestHeadersInitParameters `json:"modifyRequestHeaders,omitempty" tf:"modify_request_headers,omitempty"` + // Apply the following modifications to the request headers. The structure is documented below. + ModifyRequestHeaders []ModifyRequestHeadersInitParameters `json:"modifyRequestHeaders,omitempty" tf:"modify_request_headers,omitempty"` -// Apply the following modifications to the response headers. The structure is documented below. -ModifyResponseHeaders []ModifyResponseHeadersInitParameters `json:"modifyResponseHeaders,omitempty" tf:"modify_response_headers,omitempty"` + // Apply the following modifications to the response headers. The structure is documented below. + ModifyResponseHeaders []ModifyResponseHeadersInitParameters `json:"modifyResponseHeaders,omitempty" tf:"modify_response_headers,omitempty"` -// Name of the virtual host. Provided by the client when the virtual host is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the virtual host. Provided by the client when the virtual host is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A Route resource. Routes are matched in-order. Be careful when adding them to the end. For instance, having http '/' match first makes all other routes unused. The structure is documented below. -Route []RouteInitParameters `json:"route,omitempty" tf:"route,omitempty"` + // A Route resource. Routes are matched in-order. Be careful when adding them to the end. For instance, having http '/' match first makes all other routes unused. The structure is documented below. + Route []RouteInitParameters `json:"route,omitempty" tf:"route,omitempty"` -// Route options for the virtual host. The structure is documented below. -RouteOptions []VirtualHostRouteOptionsInitParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` + // Route options for the virtual host. The structure is documented below. + RouteOptions []VirtualHostRouteOptionsInitParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` } - type VirtualHostObservation struct { + // A list of domains (host/authority header) that will be matched to this virtual host. Wildcard hosts are supported in the form of '.foo.com' or '-bar.foo.com'. If not specified, all domains will be matched. + // +listType=set + Authority []*string `json:"authority,omitempty" tf:"authority,omitempty"` -// A list of domains (host/authority header) that will be matched to this virtual host. Wildcard hosts are supported in the form of '.foo.com' or '-bar.foo.com'. If not specified, all domains will be matched. -// +listType=set -Authority []*string `json:"authority,omitempty" tf:"authority,omitempty"` - -// The ID of the HTTP router to which the virtual host belongs. -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // The ID of the HTTP router to which the virtual host belongs. + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// The ID of the virtual host. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The ID of the virtual host. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Apply the following modifications to the request headers. The structure is documented below. -ModifyRequestHeaders []ModifyRequestHeadersObservation `json:"modifyRequestHeaders,omitempty" tf:"modify_request_headers,omitempty"` + // Apply the following modifications to the request headers. The structure is documented below. + ModifyRequestHeaders []ModifyRequestHeadersObservation `json:"modifyRequestHeaders,omitempty" tf:"modify_request_headers,omitempty"` -// Apply the following modifications to the response headers. The structure is documented below. -ModifyResponseHeaders []ModifyResponseHeadersObservation `json:"modifyResponseHeaders,omitempty" tf:"modify_response_headers,omitempty"` + // Apply the following modifications to the response headers. The structure is documented below. + ModifyResponseHeaders []ModifyResponseHeadersObservation `json:"modifyResponseHeaders,omitempty" tf:"modify_response_headers,omitempty"` -// Name of the virtual host. Provided by the client when the virtual host is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the virtual host. Provided by the client when the virtual host is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A Route resource. Routes are matched in-order. Be careful when adding them to the end. For instance, having http '/' match first makes all other routes unused. The structure is documented below. -Route []RouteObservation `json:"route,omitempty" tf:"route,omitempty"` + // A Route resource. Routes are matched in-order. Be careful when adding them to the end. For instance, having http '/' match first makes all other routes unused. The structure is documented below. + Route []RouteObservation `json:"route,omitempty" tf:"route,omitempty"` -// Route options for the virtual host. The structure is documented below. -RouteOptions []VirtualHostRouteOptionsObservation `json:"routeOptions,omitempty" tf:"route_options,omitempty"` + // Route options for the virtual host. The structure is documented below. + RouteOptions []VirtualHostRouteOptionsObservation `json:"routeOptions,omitempty" tf:"route_options,omitempty"` } - type VirtualHostParameters struct { + // A list of domains (host/authority header) that will be matched to this virtual host. Wildcard hosts are supported in the form of '.foo.com' or '-bar.foo.com'. If not specified, all domains will be matched. + // +kubebuilder:validation:Optional + // +listType=set + Authority []*string `json:"authority,omitempty" tf:"authority,omitempty"` -// A list of domains (host/authority header) that will be matched to this virtual host. Wildcard hosts are supported in the form of '.foo.com' or '-bar.foo.com'. If not specified, all domains will be matched. -// +kubebuilder:validation:Optional -// +listType=set -Authority []*string `json:"authority,omitempty" tf:"authority,omitempty"` + // The ID of the HTTP router to which the virtual host belongs. + // +crossplane:generate:reference:type=HTTPRouter + // +kubebuilder:validation:Optional + HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` -// The ID of the HTTP router to which the virtual host belongs. -// +crossplane:generate:reference:type=HTTPRouter -// +kubebuilder:validation:Optional -HTTPRouterID *string `json:"httpRouterId,omitempty" tf:"http_router_id,omitempty"` + // Reference to a HTTPRouter to populate httpRouterId. + // +kubebuilder:validation:Optional + HTTPRouterIDRef *v1.Reference `json:"httpRouterIdRef,omitempty" tf:"-"` -// Reference to a HTTPRouter to populate httpRouterId. -// +kubebuilder:validation:Optional -HTTPRouterIDRef *v1.Reference `json:"httpRouterIdRef,omitempty" tf:"-"` + // Selector for a HTTPRouter to populate httpRouterId. + // +kubebuilder:validation:Optional + HTTPRouterIDSelector *v1.Selector `json:"httpRouterIdSelector,omitempty" tf:"-"` -// Selector for a HTTPRouter to populate httpRouterId. -// +kubebuilder:validation:Optional -HTTPRouterIDSelector *v1.Selector `json:"httpRouterIdSelector,omitempty" tf:"-"` + // Apply the following modifications to the request headers. The structure is documented below. + // +kubebuilder:validation:Optional + ModifyRequestHeaders []ModifyRequestHeadersParameters `json:"modifyRequestHeaders,omitempty" tf:"modify_request_headers,omitempty"` -// Apply the following modifications to the request headers. The structure is documented below. -// +kubebuilder:validation:Optional -ModifyRequestHeaders []ModifyRequestHeadersParameters `json:"modifyRequestHeaders,omitempty" tf:"modify_request_headers,omitempty"` + // Apply the following modifications to the response headers. The structure is documented below. + // +kubebuilder:validation:Optional + ModifyResponseHeaders []ModifyResponseHeadersParameters `json:"modifyResponseHeaders,omitempty" tf:"modify_response_headers,omitempty"` -// Apply the following modifications to the response headers. The structure is documented below. -// +kubebuilder:validation:Optional -ModifyResponseHeaders []ModifyResponseHeadersParameters `json:"modifyResponseHeaders,omitempty" tf:"modify_response_headers,omitempty"` + // Name of the virtual host. Provided by the client when the virtual host is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the virtual host. Provided by the client when the virtual host is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // A Route resource. Routes are matched in-order. Be careful when adding them to the end. For instance, having http '/' match first makes all other routes unused. The structure is documented below. + // +kubebuilder:validation:Optional + Route []RouteParameters `json:"route,omitempty" tf:"route,omitempty"` -// A Route resource. Routes are matched in-order. Be careful when adding them to the end. For instance, having http '/' match first makes all other routes unused. The structure is documented below. -// +kubebuilder:validation:Optional -Route []RouteParameters `json:"route,omitempty" tf:"route,omitempty"` - -// Route options for the virtual host. The structure is documented below. -// +kubebuilder:validation:Optional -RouteOptions []VirtualHostRouteOptionsParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` + // Route options for the virtual host. The structure is documented below. + // +kubebuilder:validation:Optional + RouteOptions []VirtualHostRouteOptionsParameters `json:"routeOptions,omitempty" tf:"route_options,omitempty"` } - type VirtualHostRouteOptionsInitParameters struct { + // RBAC configuration. + Rbac []VirtualHostRouteOptionsRbacInitParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` -// RBAC configuration. -Rbac []VirtualHostRouteOptionsRbacInitParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` - -// SWS profile ID. -SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` + // SWS profile ID. + SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` } - type VirtualHostRouteOptionsObservation struct { + // RBAC configuration. + Rbac []VirtualHostRouteOptionsRbacObservation `json:"rbac,omitempty" tf:"rbac,omitempty"` -// RBAC configuration. -Rbac []VirtualHostRouteOptionsRbacObservation `json:"rbac,omitempty" tf:"rbac,omitempty"` - -// SWS profile ID. -SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` + // SWS profile ID. + SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` } - type VirtualHostRouteOptionsParameters struct { + // RBAC configuration. + // +kubebuilder:validation:Optional + Rbac []VirtualHostRouteOptionsRbacParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` -// RBAC configuration. -// +kubebuilder:validation:Optional -Rbac []VirtualHostRouteOptionsRbacParameters `json:"rbac,omitempty" tf:"rbac,omitempty"` - -// SWS profile ID. -// +kubebuilder:validation:Optional -SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` + // SWS profile ID. + // +kubebuilder:validation:Optional + SecurityProfileID *string `json:"securityProfileId,omitempty" tf:"security_profile_id,omitempty"` } - type VirtualHostRouteOptionsRbacInitParameters struct { + Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Principals []RouteOptionsRbacPrincipalsInitParameters `json:"principals,omitempty" tf:"principals,omitempty"` + Principals []RouteOptionsRbacPrincipalsInitParameters `json:"principals,omitempty" tf:"principals,omitempty"` } - type VirtualHostRouteOptionsRbacObservation struct { + Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -Principals []RouteOptionsRbacPrincipalsObservation `json:"principals,omitempty" tf:"principals,omitempty"` + Principals []RouteOptionsRbacPrincipalsObservation `json:"principals,omitempty" tf:"principals,omitempty"` } - type VirtualHostRouteOptionsRbacParameters struct { + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` -// +kubebuilder:validation:Optional -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -// +kubebuilder:validation:Optional -Principals []RouteOptionsRbacPrincipalsParameters `json:"principals" tf:"principals,omitempty"` + // +kubebuilder:validation:Optional + Principals []RouteOptionsRbacPrincipalsParameters `json:"principals" tf:"principals,omitempty"` } // VirtualHostSpec defines the desired state of VirtualHost type VirtualHostSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider VirtualHostParameters `json:"forProvider"` + ForProvider VirtualHostParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -1284,20 +1103,19 @@ type VirtualHostSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider VirtualHostInitParameters `json:"initProvider,omitempty"` + InitProvider VirtualHostInitParameters `json:"initProvider,omitempty"` } // VirtualHostStatus defines the observed state of VirtualHost. type VirtualHostStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider VirtualHostObservation `json:"atProvider,omitempty"` + AtProvider VirtualHostObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // VirtualHost is the Schema for the VirtualHosts API. Virtual hosts combine routes belonging to the same set of domains. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -1307,9 +1125,9 @@ type VirtualHostStatus struct { type VirtualHost struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec VirtualHostSpec `json:"spec"` - Status VirtualHostStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec VirtualHostSpec `json:"spec"` + Status VirtualHostStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/api/v1alpha1/zz_gateway_terraformed.go b/apis/api/v1alpha1/zz_gateway_terraformed.go index 70f3581..4b9c796 100755 --- a/apis/api/v1alpha1/zz_gateway_terraformed.go +++ b/apis/api/v1alpha1/zz_gateway_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Gateway func (mg *Gateway) GetTerraformResourceType() string { - return "yandex_api_gateway" + return "yandex_api_gateway" } // GetConnectionDetailsMapping for this Gateway func (tr *Gateway) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Gateway func (tr *Gateway) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Gateway func (tr *Gateway) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Gateway func (tr *Gateway) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Gateway func (tr *Gateway) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Gateway func (tr *Gateway) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Gateway func (tr *Gateway) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Gateway func (tr *Gateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Gateway using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Gateway) LateInitialize(attrs []byte) (bool, error) { - params := &GatewayParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &GatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Gateway) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/api/v1alpha1/zz_gateway_types.go b/apis/api/v1alpha1/zz_gateway_types.go index 87588ee..5f4d845 100755 --- a/apis/api/v1alpha1/zz_gateway_types.go +++ b/apis/api/v1alpha1/zz_gateway_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,336 +7,301 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type CanaryInitParameters struct { + // A set of values for variables in gateway specification. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` -// A set of values for variables in gateway specification. -// +mapType=granular -Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` - -// Percentage of requests, which will be processed by canary release. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Percentage of requests, which will be processed by canary release. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type CanaryObservation struct { + // A set of values for variables in gateway specification. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` -// A set of values for variables in gateway specification. -// +mapType=granular -Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` - -// Percentage of requests, which will be processed by canary release. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Percentage of requests, which will be processed by canary release. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type CanaryParameters struct { + // A set of values for variables in gateway specification. + // +kubebuilder:validation:Optional + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` -// A set of values for variables in gateway specification. -// +kubebuilder:validation:Optional -// +mapType=granular -Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` - -// Percentage of requests, which will be processed by canary release. -// +kubebuilder:validation:Optional -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // Percentage of requests, which will be processed by canary release. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type ConnectivityInitParameters struct { - -// Network the gateway will have access to. It's essential to specify network with subnets in all availability zones. -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Network the gateway will have access to. It's essential to specify network with subnets in all availability zones. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` } - type ConnectivityObservation struct { - -// Network the gateway will have access to. It's essential to specify network with subnets in all availability zones. -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Network the gateway will have access to. It's essential to specify network with subnets in all availability zones. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` } - type ConnectivityParameters struct { - -// Network the gateway will have access to. It's essential to specify network with subnets in all availability zones. -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId" tf:"network_id,omitempty"` + // Network the gateway will have access to. It's essential to specify network with subnets in all availability zones. + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId" tf:"network_id,omitempty"` } - type CustomDomainsInitParameters struct { + CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` -CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` - -DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` - -Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` } - type CustomDomainsObservation struct { + CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` -CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` - -DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` - -Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` } - type CustomDomainsParameters struct { + // +kubebuilder:validation:Optional + CertificateID *string `json:"certificateId" tf:"certificate_id,omitempty"` -// +kubebuilder:validation:Optional -CertificateID *string `json:"certificateId" tf:"certificate_id,omitempty"` + // +kubebuilder:validation:Optional + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` -// +kubebuilder:validation:Optional -DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` - -// +kubebuilder:validation:Optional -Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` } - type GatewayInitParameters struct { + // Canary release settings of gateway. + Canary []CanaryInitParameters `json:"canary,omitempty" tf:"canary,omitempty"` -// Canary release settings of gateway. -Canary []CanaryInitParameters `json:"canary,omitempty" tf:"canary,omitempty"` - -// Gateway connectivity. If specified the gateway will be attached to specified network. -Connectivity []ConnectivityInitParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + // Gateway connectivity. If specified the gateway will be attached to specified network. + Connectivity []ConnectivityInitParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` -// Set of custom domains to be attached to Yandex API Gateway. -CustomDomains []CustomDomainsInitParameters `json:"customDomains,omitempty" tf:"custom_domains,omitempty"` + // Set of custom domains to be attached to Yandex API Gateway. + CustomDomains []CustomDomainsInitParameters `json:"customDomains,omitempty" tf:"custom_domains,omitempty"` -// Description of the Yandex Cloud API Gateway. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Yandex Cloud API Gateway. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Execution timeout in seconds for the Yandex Cloud API Gateway. -ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + // Execution timeout in seconds for the Yandex Cloud API Gateway. + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` -// Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of key/value label pairs to assign to the Yandex Cloud API Gateway. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud API Gateway. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging from Yandex Cloud API Gateway. -LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging from Yandex Cloud API Gateway. + LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Yandex Cloud API Gateway name used to define API Gateway. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Yandex Cloud API Gateway name used to define API Gateway. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// OpenAPI specification for Yandex API Gateway. -Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` + // OpenAPI specification for Yandex API Gateway. + Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` -// A set of values for variables in gateway specification. -// +mapType=granular -Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` + // A set of values for variables in gateway specification. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` } - type GatewayObservation struct { + // Canary release settings of gateway. + Canary []CanaryObservation `json:"canary,omitempty" tf:"canary,omitempty"` -// Canary release settings of gateway. -Canary []CanaryObservation `json:"canary,omitempty" tf:"canary,omitempty"` - -// Gateway connectivity. If specified the gateway will be attached to specified network. -Connectivity []ConnectivityObservation `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + // Gateway connectivity. If specified the gateway will be attached to specified network. + Connectivity []ConnectivityObservation `json:"connectivity,omitempty" tf:"connectivity,omitempty"` -// Creation timestamp of the Yandex Cloud API Gateway. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Creation timestamp of the Yandex Cloud API Gateway. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Set of custom domains to be attached to Yandex API Gateway. -CustomDomains []CustomDomainsObservation `json:"customDomains,omitempty" tf:"custom_domains,omitempty"` + // Set of custom domains to be attached to Yandex API Gateway. + CustomDomains []CustomDomainsObservation `json:"customDomains,omitempty" tf:"custom_domains,omitempty"` -// Description of the Yandex Cloud API Gateway. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Yandex Cloud API Gateway. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Default domain for the Yandex API Gateway. Generated at creation time. -Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + // Default domain for the Yandex API Gateway. Generated at creation time. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` -// Execution timeout in seconds for the Yandex Cloud API Gateway. -ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + // Execution timeout in seconds for the Yandex Cloud API Gateway. + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` -// Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud API Gateway. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud API Gateway. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Options for logging from Yandex Cloud API Gateway. -LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging from Yandex Cloud API Gateway. + LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Yandex Cloud API Gateway name used to define API Gateway. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Yandex Cloud API Gateway name used to define API Gateway. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// OpenAPI specification for Yandex API Gateway. -Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` + // OpenAPI specification for Yandex API Gateway. + Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` -// Status of the Yandex API Gateway. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // Status of the Yandex API Gateway. + Status *string `json:"status,omitempty" tf:"status,omitempty"` -// (DEPRECATED, use custom_domains instead) Set of user domains attached to Yandex API Gateway. -// +listType=set -UserDomains []*string `json:"userDomains,omitempty" tf:"user_domains,omitempty"` + // (DEPRECATED, use custom_domains instead) Set of user domains attached to Yandex API Gateway. + // +listType=set + UserDomains []*string `json:"userDomains,omitempty" tf:"user_domains,omitempty"` -// A set of values for variables in gateway specification. -// +mapType=granular -Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` + // A set of values for variables in gateway specification. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` } - type GatewayParameters struct { + // Canary release settings of gateway. + // +kubebuilder:validation:Optional + Canary []CanaryParameters `json:"canary,omitempty" tf:"canary,omitempty"` -// Canary release settings of gateway. -// +kubebuilder:validation:Optional -Canary []CanaryParameters `json:"canary,omitempty" tf:"canary,omitempty"` - -// Gateway connectivity. If specified the gateway will be attached to specified network. -// +kubebuilder:validation:Optional -Connectivity []ConnectivityParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + // Gateway connectivity. If specified the gateway will be attached to specified network. + // +kubebuilder:validation:Optional + Connectivity []ConnectivityParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` -// Set of custom domains to be attached to Yandex API Gateway. -// +kubebuilder:validation:Optional -CustomDomains []CustomDomainsParameters `json:"customDomains,omitempty" tf:"custom_domains,omitempty"` + // Set of custom domains to be attached to Yandex API Gateway. + // +kubebuilder:validation:Optional + CustomDomains []CustomDomainsParameters `json:"customDomains,omitempty" tf:"custom_domains,omitempty"` -// Description of the Yandex Cloud API Gateway. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Yandex Cloud API Gateway. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Execution timeout in seconds for the Yandex Cloud API Gateway. -// +kubebuilder:validation:Optional -ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + // Execution timeout in seconds for the Yandex Cloud API Gateway. + // +kubebuilder:validation:Optional + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` -// Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of key/value label pairs to assign to the Yandex Cloud API Gateway. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud API Gateway. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging from Yandex Cloud API Gateway. -// +kubebuilder:validation:Optional -LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging from Yandex Cloud API Gateway. + // +kubebuilder:validation:Optional + LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Yandex Cloud API Gateway name used to define API Gateway. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Yandex Cloud API Gateway name used to define API Gateway. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// OpenAPI specification for Yandex API Gateway. -// +kubebuilder:validation:Optional -Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` + // OpenAPI specification for Yandex API Gateway. + // +kubebuilder:validation:Optional + Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` -// A set of values for variables in gateway specification. -// +kubebuilder:validation:Optional -// +mapType=granular -Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` + // A set of values for variables in gateway specification. + // +kubebuilder:validation:Optional + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` } - type LogOptionsInitParameters struct { + // Is logging from API Gateway disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging from API Gateway disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` - -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type LogOptionsObservation struct { + // Is logging from API Gateway disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging from API Gateway disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` - -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type LogOptionsParameters struct { + // Is logging from API Gateway disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging from API Gateway disabled -// +kubebuilder:validation:Optional -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Log entries are written to specified log group -// +kubebuilder:validation:Optional -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` - -// Minimum log entry level -// +kubebuilder:validation:Optional -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } // GatewaySpec defines the desired state of Gateway type GatewaySpec struct { v1.ResourceSpec `json:",inline"` - ForProvider GatewayParameters `json:"forProvider"` + ForProvider GatewayParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -349,20 +312,19 @@ type GatewaySpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider GatewayInitParameters `json:"initProvider,omitempty"` + InitProvider GatewayInitParameters `json:"initProvider,omitempty"` } // GatewayStatus defines the observed state of Gateway. type GatewayStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider GatewayObservation `json:"atProvider,omitempty"` + AtProvider GatewayObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Gateway is the Schema for the Gateways API. Allows management of a Yandex Cloud API Gateway. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -372,10 +334,10 @@ type GatewayStatus struct { type Gateway struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spec) || (has(self.initProvider) && has(self.initProvider.spec))",message="spec.forProvider.spec is a required parameter" - Spec GatewaySpec `json:"spec"` - Status GatewayStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spec) || (has(self.initProvider) && has(self.initProvider.spec))",message="spec.forProvider.spec is a required parameter" + Spec GatewaySpec `json:"spec"` + Status GatewayStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/api/v1alpha1/zz_generated.conversion_hubs.go b/apis/api/v1alpha1/zz_generated.conversion_hubs.go index 7436418..41b1249 100755 --- a/apis/api/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/api/v1alpha1/zz_generated.conversion_hubs.go @@ -1,10 +1,6 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 - - // Hub marks this type as a conversion hub. - func (tr *Gateway) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *Gateway) Hub() {} diff --git a/apis/api/v1alpha1/zz_generated.deepcopy.go b/apis/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..9bc5460 --- /dev/null +++ b/apis/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,823 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanaryInitParameters) DeepCopyInto(out *CanaryInitParameters) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryInitParameters. +func (in *CanaryInitParameters) DeepCopy() *CanaryInitParameters { + if in == nil { + return nil + } + out := new(CanaryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanaryObservation) DeepCopyInto(out *CanaryObservation) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryObservation. +func (in *CanaryObservation) DeepCopy() *CanaryObservation { + if in == nil { + return nil + } + out := new(CanaryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanaryParameters) DeepCopyInto(out *CanaryParameters) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryParameters. +func (in *CanaryParameters) DeepCopy() *CanaryParameters { + if in == nil { + return nil + } + out := new(CanaryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityInitParameters) DeepCopyInto(out *ConnectivityInitParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInitParameters. +func (in *ConnectivityInitParameters) DeepCopy() *ConnectivityInitParameters { + if in == nil { + return nil + } + out := new(ConnectivityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityObservation) DeepCopyInto(out *ConnectivityObservation) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityObservation. +func (in *ConnectivityObservation) DeepCopy() *ConnectivityObservation { + if in == nil { + return nil + } + out := new(ConnectivityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityParameters) DeepCopyInto(out *ConnectivityParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityParameters. +func (in *ConnectivityParameters) DeepCopy() *ConnectivityParameters { + if in == nil { + return nil + } + out := new(ConnectivityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainsInitParameters) DeepCopyInto(out *CustomDomainsInitParameters) { + *out = *in + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainsInitParameters. +func (in *CustomDomainsInitParameters) DeepCopy() *CustomDomainsInitParameters { + if in == nil { + return nil + } + out := new(CustomDomainsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainsObservation) DeepCopyInto(out *CustomDomainsObservation) { + *out = *in + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainsObservation. +func (in *CustomDomainsObservation) DeepCopy() *CustomDomainsObservation { + if in == nil { + return nil + } + out := new(CustomDomainsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainsParameters) DeepCopyInto(out *CustomDomainsParameters) { + *out = *in + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainsParameters. +func (in *CustomDomainsParameters) DeepCopy() *CustomDomainsParameters { + if in == nil { + return nil + } + out := new(CustomDomainsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayInitParameters) DeepCopyInto(out *GatewayInitParameters) { + *out = *in + if in.Canary != nil { + in, out := &in.Canary, &out.Canary + *out = make([]CanaryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomDomains != nil { + in, out := &in.CustomDomains, &out.CustomDomains + *out = make([]CustomDomainsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(string) + **out = **in + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayInitParameters. +func (in *GatewayInitParameters) DeepCopy() *GatewayInitParameters { + if in == nil { + return nil + } + out := new(GatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayList) DeepCopyInto(out *GatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList. +func (in *GatewayList) DeepCopy() *GatewayList { + if in == nil { + return nil + } + out := new(GatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayObservation) DeepCopyInto(out *GatewayObservation) { + *out = *in + if in.Canary != nil { + in, out := &in.Canary, &out.Canary + *out = make([]CanaryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.CustomDomains != nil { + in, out := &in.CustomDomains, &out.CustomDomains + *out = make([]CustomDomainsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.UserDomains != nil { + in, out := &in.UserDomains, &out.UserDomains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayObservation. +func (in *GatewayObservation) DeepCopy() *GatewayObservation { + if in == nil { + return nil + } + out := new(GatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayParameters) DeepCopyInto(out *GatewayParameters) { + *out = *in + if in.Canary != nil { + in, out := &in.Canary, &out.Canary + *out = make([]CanaryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomDomains != nil { + in, out := &in.CustomDomains, &out.CustomDomains + *out = make([]CustomDomainsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(string) + **out = **in + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayParameters. +func (in *GatewayParameters) DeepCopy() *GatewayParameters { + if in == nil { + return nil + } + out := new(GatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. +func (in *GatewaySpec) DeepCopy() *GatewaySpec { + if in == nil { + return nil + } + out := new(GatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayStatus. +func (in *GatewayStatus) DeepCopy() *GatewayStatus { + if in == nil { + return nil + } + out := new(GatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsInitParameters) DeepCopyInto(out *LogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsInitParameters. +func (in *LogOptionsInitParameters) DeepCopy() *LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsObservation) DeepCopyInto(out *LogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsObservation. +func (in *LogOptionsObservation) DeepCopy() *LogOptionsObservation { + if in == nil { + return nil + } + out := new(LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsParameters) DeepCopyInto(out *LogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsParameters. +func (in *LogOptionsParameters) DeepCopy() *LogOptionsParameters { + if in == nil { + return nil + } + out := new(LogOptionsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/api/v1alpha1/zz_generated.resolvers.go b/apis/api/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..155dd00 --- /dev/null +++ b/apis/api/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Gateway. +func (mg *Gateway) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/api/v1alpha1/zz_groupversion_info.go b/apis/api/v1alpha1/zz_groupversion_info.go index 05f35d5..a7b4506 100755 --- a/apis/api/v1alpha1/zz_groupversion_info.go +++ b/apis/api/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/audit/v1alpha1/zz_generated.conversion_hubs.go b/apis/audit/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..42606be --- /dev/null +++ b/apis/audit/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *TrailsTrail) Hub() {} diff --git a/apis/audit/v1alpha1/zz_generated.deepcopy.go b/apis/audit/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..9411c2c --- /dev/null +++ b/apis/audit/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1987 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnyFilterInitParameters) DeepCopyInto(out *AnyFilterInitParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnyFilterInitParameters. +func (in *AnyFilterInitParameters) DeepCopy() *AnyFilterInitParameters { + if in == nil { + return nil + } + out := new(AnyFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnyFilterObservation) DeepCopyInto(out *AnyFilterObservation) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnyFilterObservation. +func (in *AnyFilterObservation) DeepCopy() *AnyFilterObservation { + if in == nil { + return nil + } + out := new(AnyFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnyFilterParameters) DeepCopyInto(out *AnyFilterParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnyFilterParameters. +func (in *AnyFilterParameters) DeepCopy() *AnyFilterParameters { + if in == nil { + return nil + } + out := new(AnyFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnyFiltersInitParameters) DeepCopyInto(out *AnyFiltersInitParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnyFiltersInitParameters. +func (in *AnyFiltersInitParameters) DeepCopy() *AnyFiltersInitParameters { + if in == nil { + return nil + } + out := new(AnyFiltersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnyFiltersObservation) DeepCopyInto(out *AnyFiltersObservation) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnyFiltersObservation. +func (in *AnyFiltersObservation) DeepCopy() *AnyFiltersObservation { + if in == nil { + return nil + } + out := new(AnyFiltersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnyFiltersParameters) DeepCopyInto(out *AnyFiltersParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnyFiltersParameters. +func (in *AnyFiltersParameters) DeepCopy() *AnyFiltersParameters { + if in == nil { + return nil + } + out := new(AnyFiltersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CategoriesInitParameters) DeepCopyInto(out *CategoriesInitParameters) { + *out = *in + if in.Plane != nil { + in, out := &in.Plane, &out.Plane + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CategoriesInitParameters. +func (in *CategoriesInitParameters) DeepCopy() *CategoriesInitParameters { + if in == nil { + return nil + } + out := new(CategoriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CategoriesObservation) DeepCopyInto(out *CategoriesObservation) { + *out = *in + if in.Plane != nil { + in, out := &in.Plane, &out.Plane + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CategoriesObservation. +func (in *CategoriesObservation) DeepCopy() *CategoriesObservation { + if in == nil { + return nil + } + out := new(CategoriesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CategoriesParameters) DeepCopyInto(out *CategoriesParameters) { + *out = *in + if in.Plane != nil { + in, out := &in.Plane, &out.Plane + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CategoriesParameters. +func (in *CategoriesParameters) DeepCopy() *CategoriesParameters { + if in == nil { + return nil + } + out := new(CategoriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataEventsFilterInitParameters) DeepCopyInto(out *DataEventsFilterInitParameters) { + *out = *in + if in.ExcludedEvents != nil { + in, out := &in.ExcludedEvents, &out.ExcludedEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludedEvents != nil { + in, out := &in.IncludedEvents, &out.IncludedEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceScope != nil { + in, out := &in.ResourceScope, &out.ResourceScope + *out = make([]ResourceScopeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataEventsFilterInitParameters. +func (in *DataEventsFilterInitParameters) DeepCopy() *DataEventsFilterInitParameters { + if in == nil { + return nil + } + out := new(DataEventsFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataEventsFilterObservation) DeepCopyInto(out *DataEventsFilterObservation) { + *out = *in + if in.ExcludedEvents != nil { + in, out := &in.ExcludedEvents, &out.ExcludedEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludedEvents != nil { + in, out := &in.IncludedEvents, &out.IncludedEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceScope != nil { + in, out := &in.ResourceScope, &out.ResourceScope + *out = make([]ResourceScopeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataEventsFilterObservation. +func (in *DataEventsFilterObservation) DeepCopy() *DataEventsFilterObservation { + if in == nil { + return nil + } + out := new(DataEventsFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataEventsFilterParameters) DeepCopyInto(out *DataEventsFilterParameters) { + *out = *in + if in.ExcludedEvents != nil { + in, out := &in.ExcludedEvents, &out.ExcludedEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludedEvents != nil { + in, out := &in.IncludedEvents, &out.IncludedEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceScope != nil { + in, out := &in.ResourceScope, &out.ResourceScope + *out = make([]ResourceScopeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataEventsFilterParameters. +func (in *DataEventsFilterParameters) DeepCopy() *DataEventsFilterParameters { + if in == nil { + return nil + } + out := new(DataEventsFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStreamDestinationInitParameters) DeepCopyInto(out *DataStreamDestinationInitParameters) { + *out = *in + if in.DatabaseID != nil { + in, out := &in.DatabaseID, &out.DatabaseID + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStreamDestinationInitParameters. +func (in *DataStreamDestinationInitParameters) DeepCopy() *DataStreamDestinationInitParameters { + if in == nil { + return nil + } + out := new(DataStreamDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStreamDestinationObservation) DeepCopyInto(out *DataStreamDestinationObservation) { + *out = *in + if in.DatabaseID != nil { + in, out := &in.DatabaseID, &out.DatabaseID + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStreamDestinationObservation. +func (in *DataStreamDestinationObservation) DeepCopy() *DataStreamDestinationObservation { + if in == nil { + return nil + } + out := new(DataStreamDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStreamDestinationParameters) DeepCopyInto(out *DataStreamDestinationParameters) { + *out = *in + if in.DatabaseID != nil { + in, out := &in.DatabaseID, &out.DatabaseID + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStreamDestinationParameters. +func (in *DataStreamDestinationParameters) DeepCopy() *DataStreamDestinationParameters { + if in == nil { + return nil + } + out := new(DataStreamDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventFiltersInitParameters) DeepCopyInto(out *EventFiltersInitParameters) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]CategoriesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PathFilter != nil { + in, out := &in.PathFilter, &out.PathFilter + *out = make([]PathFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventFiltersInitParameters. +func (in *EventFiltersInitParameters) DeepCopy() *EventFiltersInitParameters { + if in == nil { + return nil + } + out := new(EventFiltersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventFiltersObservation) DeepCopyInto(out *EventFiltersObservation) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]CategoriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PathFilter != nil { + in, out := &in.PathFilter, &out.PathFilter + *out = make([]PathFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventFiltersObservation. +func (in *EventFiltersObservation) DeepCopy() *EventFiltersObservation { + if in == nil { + return nil + } + out := new(EventFiltersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventFiltersParameters) DeepCopyInto(out *EventFiltersParameters) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]CategoriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PathFilter != nil { + in, out := &in.PathFilter, &out.PathFilter + *out = make([]PathFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventFiltersParameters. +func (in *EventFiltersParameters) DeepCopy() *EventFiltersParameters { + if in == nil { + return nil + } + out := new(EventFiltersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.EventFilters != nil { + in, out := &in.EventFilters, &out.EventFilters + *out = make([]EventFiltersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PathFilter != nil { + in, out := &in.PathFilter, &out.PathFilter + *out = make([]FilterPathFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.EventFilters != nil { + in, out := &in.EventFilters, &out.EventFilters + *out = make([]EventFiltersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PathFilter != nil { + in, out := &in.PathFilter, &out.PathFilter + *out = make([]FilterPathFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.EventFilters != nil { + in, out := &in.EventFilters, &out.EventFilters + *out = make([]EventFiltersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PathFilter != nil { + in, out := &in.PathFilter, &out.PathFilter + *out = make([]FilterPathFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterPathFilterInitParameters) DeepCopyInto(out *FilterPathFilterInitParameters) { + *out = *in + if in.AnyFilter != nil { + in, out := &in.AnyFilter, &out.AnyFilter + *out = make([]PathFilterAnyFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SomeFilter != nil { + in, out := &in.SomeFilter, &out.SomeFilter + *out = make([]PathFilterSomeFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterPathFilterInitParameters. +func (in *FilterPathFilterInitParameters) DeepCopy() *FilterPathFilterInitParameters { + if in == nil { + return nil + } + out := new(FilterPathFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterPathFilterObservation) DeepCopyInto(out *FilterPathFilterObservation) { + *out = *in + if in.AnyFilter != nil { + in, out := &in.AnyFilter, &out.AnyFilter + *out = make([]PathFilterAnyFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SomeFilter != nil { + in, out := &in.SomeFilter, &out.SomeFilter + *out = make([]PathFilterSomeFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterPathFilterObservation. +func (in *FilterPathFilterObservation) DeepCopy() *FilterPathFilterObservation { + if in == nil { + return nil + } + out := new(FilterPathFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterPathFilterParameters) DeepCopyInto(out *FilterPathFilterParameters) { + *out = *in + if in.AnyFilter != nil { + in, out := &in.AnyFilter, &out.AnyFilter + *out = make([]PathFilterAnyFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SomeFilter != nil { + in, out := &in.SomeFilter, &out.SomeFilter + *out = make([]PathFilterSomeFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterPathFilterParameters. +func (in *FilterPathFilterParameters) DeepCopy() *FilterPathFilterParameters { + if in == nil { + return nil + } + out := new(FilterPathFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilteringPolicyInitParameters) DeepCopyInto(out *FilteringPolicyInitParameters) { + *out = *in + if in.DataEventsFilter != nil { + in, out := &in.DataEventsFilter, &out.DataEventsFilter + *out = make([]DataEventsFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagementEventsFilter != nil { + in, out := &in.ManagementEventsFilter, &out.ManagementEventsFilter + *out = make([]ManagementEventsFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilteringPolicyInitParameters. +func (in *FilteringPolicyInitParameters) DeepCopy() *FilteringPolicyInitParameters { + if in == nil { + return nil + } + out := new(FilteringPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilteringPolicyObservation) DeepCopyInto(out *FilteringPolicyObservation) { + *out = *in + if in.DataEventsFilter != nil { + in, out := &in.DataEventsFilter, &out.DataEventsFilter + *out = make([]DataEventsFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagementEventsFilter != nil { + in, out := &in.ManagementEventsFilter, &out.ManagementEventsFilter + *out = make([]ManagementEventsFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilteringPolicyObservation. +func (in *FilteringPolicyObservation) DeepCopy() *FilteringPolicyObservation { + if in == nil { + return nil + } + out := new(FilteringPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilteringPolicyParameters) DeepCopyInto(out *FilteringPolicyParameters) { + *out = *in + if in.DataEventsFilter != nil { + in, out := &in.DataEventsFilter, &out.DataEventsFilter + *out = make([]DataEventsFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagementEventsFilter != nil { + in, out := &in.ManagementEventsFilter, &out.ManagementEventsFilter + *out = make([]ManagementEventsFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilteringPolicyParameters. +func (in *FilteringPolicyParameters) DeepCopy() *FilteringPolicyParameters { + if in == nil { + return nil + } + out := new(FilteringPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingDestinationInitParameters) DeepCopyInto(out *LoggingDestinationInitParameters) { + *out = *in + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingDestinationInitParameters. +func (in *LoggingDestinationInitParameters) DeepCopy() *LoggingDestinationInitParameters { + if in == nil { + return nil + } + out := new(LoggingDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingDestinationObservation) DeepCopyInto(out *LoggingDestinationObservation) { + *out = *in + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingDestinationObservation. +func (in *LoggingDestinationObservation) DeepCopy() *LoggingDestinationObservation { + if in == nil { + return nil + } + out := new(LoggingDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingDestinationParameters) DeepCopyInto(out *LoggingDestinationParameters) { + *out = *in + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingDestinationParameters. +func (in *LoggingDestinationParameters) DeepCopy() *LoggingDestinationParameters { + if in == nil { + return nil + } + out := new(LoggingDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementEventsFilterInitParameters) DeepCopyInto(out *ManagementEventsFilterInitParameters) { + *out = *in + if in.ResourceScope != nil { + in, out := &in.ResourceScope, &out.ResourceScope + *out = make([]ManagementEventsFilterResourceScopeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementEventsFilterInitParameters. +func (in *ManagementEventsFilterInitParameters) DeepCopy() *ManagementEventsFilterInitParameters { + if in == nil { + return nil + } + out := new(ManagementEventsFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementEventsFilterObservation) DeepCopyInto(out *ManagementEventsFilterObservation) { + *out = *in + if in.ResourceScope != nil { + in, out := &in.ResourceScope, &out.ResourceScope + *out = make([]ManagementEventsFilterResourceScopeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementEventsFilterObservation. +func (in *ManagementEventsFilterObservation) DeepCopy() *ManagementEventsFilterObservation { + if in == nil { + return nil + } + out := new(ManagementEventsFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementEventsFilterParameters) DeepCopyInto(out *ManagementEventsFilterParameters) { + *out = *in + if in.ResourceScope != nil { + in, out := &in.ResourceScope, &out.ResourceScope + *out = make([]ManagementEventsFilterResourceScopeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementEventsFilterParameters. +func (in *ManagementEventsFilterParameters) DeepCopy() *ManagementEventsFilterParameters { + if in == nil { + return nil + } + out := new(ManagementEventsFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementEventsFilterResourceScopeInitParameters) DeepCopyInto(out *ManagementEventsFilterResourceScopeInitParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementEventsFilterResourceScopeInitParameters. +func (in *ManagementEventsFilterResourceScopeInitParameters) DeepCopy() *ManagementEventsFilterResourceScopeInitParameters { + if in == nil { + return nil + } + out := new(ManagementEventsFilterResourceScopeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementEventsFilterResourceScopeObservation) DeepCopyInto(out *ManagementEventsFilterResourceScopeObservation) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementEventsFilterResourceScopeObservation. +func (in *ManagementEventsFilterResourceScopeObservation) DeepCopy() *ManagementEventsFilterResourceScopeObservation { + if in == nil { + return nil + } + out := new(ManagementEventsFilterResourceScopeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementEventsFilterResourceScopeParameters) DeepCopyInto(out *ManagementEventsFilterResourceScopeParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementEventsFilterResourceScopeParameters. +func (in *ManagementEventsFilterResourceScopeParameters) DeepCopy() *ManagementEventsFilterResourceScopeParameters { + if in == nil { + return nil + } + out := new(ManagementEventsFilterResourceScopeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathFilterAnyFilterInitParameters) DeepCopyInto(out *PathFilterAnyFilterInitParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathFilterAnyFilterInitParameters. +func (in *PathFilterAnyFilterInitParameters) DeepCopy() *PathFilterAnyFilterInitParameters { + if in == nil { + return nil + } + out := new(PathFilterAnyFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathFilterAnyFilterObservation) DeepCopyInto(out *PathFilterAnyFilterObservation) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathFilterAnyFilterObservation. +func (in *PathFilterAnyFilterObservation) DeepCopy() *PathFilterAnyFilterObservation { + if in == nil { + return nil + } + out := new(PathFilterAnyFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathFilterAnyFilterParameters) DeepCopyInto(out *PathFilterAnyFilterParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathFilterAnyFilterParameters. +func (in *PathFilterAnyFilterParameters) DeepCopy() *PathFilterAnyFilterParameters { + if in == nil { + return nil + } + out := new(PathFilterAnyFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathFilterInitParameters) DeepCopyInto(out *PathFilterInitParameters) { + *out = *in + if in.AnyFilter != nil { + in, out := &in.AnyFilter, &out.AnyFilter + *out = make([]AnyFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SomeFilter != nil { + in, out := &in.SomeFilter, &out.SomeFilter + *out = make([]SomeFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathFilterInitParameters. +func (in *PathFilterInitParameters) DeepCopy() *PathFilterInitParameters { + if in == nil { + return nil + } + out := new(PathFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathFilterObservation) DeepCopyInto(out *PathFilterObservation) { + *out = *in + if in.AnyFilter != nil { + in, out := &in.AnyFilter, &out.AnyFilter + *out = make([]AnyFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SomeFilter != nil { + in, out := &in.SomeFilter, &out.SomeFilter + *out = make([]SomeFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathFilterObservation. +func (in *PathFilterObservation) DeepCopy() *PathFilterObservation { + if in == nil { + return nil + } + out := new(PathFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathFilterParameters) DeepCopyInto(out *PathFilterParameters) { + *out = *in + if in.AnyFilter != nil { + in, out := &in.AnyFilter, &out.AnyFilter + *out = make([]AnyFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SomeFilter != nil { + in, out := &in.SomeFilter, &out.SomeFilter + *out = make([]SomeFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathFilterParameters. +func (in *PathFilterParameters) DeepCopy() *PathFilterParameters { + if in == nil { + return nil + } + out := new(PathFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathFilterSomeFilterInitParameters) DeepCopyInto(out *PathFilterSomeFilterInitParameters) { + *out = *in + if in.AnyFilters != nil { + in, out := &in.AnyFilters, &out.AnyFilters + *out = make([]SomeFilterAnyFiltersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathFilterSomeFilterInitParameters. +func (in *PathFilterSomeFilterInitParameters) DeepCopy() *PathFilterSomeFilterInitParameters { + if in == nil { + return nil + } + out := new(PathFilterSomeFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathFilterSomeFilterObservation) DeepCopyInto(out *PathFilterSomeFilterObservation) { + *out = *in + if in.AnyFilters != nil { + in, out := &in.AnyFilters, &out.AnyFilters + *out = make([]SomeFilterAnyFiltersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathFilterSomeFilterObservation. +func (in *PathFilterSomeFilterObservation) DeepCopy() *PathFilterSomeFilterObservation { + if in == nil { + return nil + } + out := new(PathFilterSomeFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathFilterSomeFilterParameters) DeepCopyInto(out *PathFilterSomeFilterParameters) { + *out = *in + if in.AnyFilters != nil { + in, out := &in.AnyFilters, &out.AnyFilters + *out = make([]SomeFilterAnyFiltersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathFilterSomeFilterParameters. +func (in *PathFilterSomeFilterParameters) DeepCopy() *PathFilterSomeFilterParameters { + if in == nil { + return nil + } + out := new(PathFilterSomeFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceScopeInitParameters) DeepCopyInto(out *ResourceScopeInitParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceScopeInitParameters. +func (in *ResourceScopeInitParameters) DeepCopy() *ResourceScopeInitParameters { + if in == nil { + return nil + } + out := new(ResourceScopeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceScopeObservation) DeepCopyInto(out *ResourceScopeObservation) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceScopeObservation. +func (in *ResourceScopeObservation) DeepCopy() *ResourceScopeObservation { + if in == nil { + return nil + } + out := new(ResourceScopeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceScopeParameters) DeepCopyInto(out *ResourceScopeParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceScopeParameters. +func (in *ResourceScopeParameters) DeepCopy() *ResourceScopeParameters { + if in == nil { + return nil + } + out := new(ResourceScopeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SomeFilterAnyFiltersInitParameters) DeepCopyInto(out *SomeFilterAnyFiltersInitParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SomeFilterAnyFiltersInitParameters. +func (in *SomeFilterAnyFiltersInitParameters) DeepCopy() *SomeFilterAnyFiltersInitParameters { + if in == nil { + return nil + } + out := new(SomeFilterAnyFiltersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SomeFilterAnyFiltersObservation) DeepCopyInto(out *SomeFilterAnyFiltersObservation) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SomeFilterAnyFiltersObservation. +func (in *SomeFilterAnyFiltersObservation) DeepCopy() *SomeFilterAnyFiltersObservation { + if in == nil { + return nil + } + out := new(SomeFilterAnyFiltersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SomeFilterAnyFiltersParameters) DeepCopyInto(out *SomeFilterAnyFiltersParameters) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SomeFilterAnyFiltersParameters. +func (in *SomeFilterAnyFiltersParameters) DeepCopy() *SomeFilterAnyFiltersParameters { + if in == nil { + return nil + } + out := new(SomeFilterAnyFiltersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SomeFilterInitParameters) DeepCopyInto(out *SomeFilterInitParameters) { + *out = *in + if in.AnyFilters != nil { + in, out := &in.AnyFilters, &out.AnyFilters + *out = make([]AnyFiltersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SomeFilterInitParameters. +func (in *SomeFilterInitParameters) DeepCopy() *SomeFilterInitParameters { + if in == nil { + return nil + } + out := new(SomeFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SomeFilterObservation) DeepCopyInto(out *SomeFilterObservation) { + *out = *in + if in.AnyFilters != nil { + in, out := &in.AnyFilters, &out.AnyFilters + *out = make([]AnyFiltersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SomeFilterObservation. +func (in *SomeFilterObservation) DeepCopy() *SomeFilterObservation { + if in == nil { + return nil + } + out := new(SomeFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SomeFilterParameters) DeepCopyInto(out *SomeFilterParameters) { + *out = *in + if in.AnyFilters != nil { + in, out := &in.AnyFilters, &out.AnyFilters + *out = make([]AnyFiltersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SomeFilterParameters. +func (in *SomeFilterParameters) DeepCopy() *SomeFilterParameters { + if in == nil { + return nil + } + out := new(SomeFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageDestinationInitParameters) DeepCopyInto(out *StorageDestinationInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ObjectPrefix != nil { + in, out := &in.ObjectPrefix, &out.ObjectPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageDestinationInitParameters. +func (in *StorageDestinationInitParameters) DeepCopy() *StorageDestinationInitParameters { + if in == nil { + return nil + } + out := new(StorageDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageDestinationObservation) DeepCopyInto(out *StorageDestinationObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ObjectPrefix != nil { + in, out := &in.ObjectPrefix, &out.ObjectPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageDestinationObservation. +func (in *StorageDestinationObservation) DeepCopy() *StorageDestinationObservation { + if in == nil { + return nil + } + out := new(StorageDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageDestinationParameters) DeepCopyInto(out *StorageDestinationParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ObjectPrefix != nil { + in, out := &in.ObjectPrefix, &out.ObjectPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageDestinationParameters. +func (in *StorageDestinationParameters) DeepCopy() *StorageDestinationParameters { + if in == nil { + return nil + } + out := new(StorageDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrailsTrail) DeepCopyInto(out *TrailsTrail) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrailsTrail. +func (in *TrailsTrail) DeepCopy() *TrailsTrail { + if in == nil { + return nil + } + out := new(TrailsTrail) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrailsTrail) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrailsTrailInitParameters) DeepCopyInto(out *TrailsTrailInitParameters) { + *out = *in + if in.DataStreamDestination != nil { + in, out := &in.DataStreamDestination, &out.DataStreamDestination + *out = make([]DataStreamDestinationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilteringPolicy != nil { + in, out := &in.FilteringPolicy, &out.FilteringPolicy + *out = make([]FilteringPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LoggingDestination != nil { + in, out := &in.LoggingDestination, &out.LoggingDestination + *out = make([]LoggingDestinationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageDestination != nil { + in, out := &in.StorageDestination, &out.StorageDestination + *out = make([]StorageDestinationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrailsTrailInitParameters. +func (in *TrailsTrailInitParameters) DeepCopy() *TrailsTrailInitParameters { + if in == nil { + return nil + } + out := new(TrailsTrailInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrailsTrailList) DeepCopyInto(out *TrailsTrailList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TrailsTrail, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrailsTrailList. +func (in *TrailsTrailList) DeepCopy() *TrailsTrailList { + if in == nil { + return nil + } + out := new(TrailsTrailList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrailsTrailList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrailsTrailObservation) DeepCopyInto(out *TrailsTrailObservation) { + *out = *in + if in.DataStreamDestination != nil { + in, out := &in.DataStreamDestination, &out.DataStreamDestination + *out = make([]DataStreamDestinationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilteringPolicy != nil { + in, out := &in.FilteringPolicy, &out.FilteringPolicy + *out = make([]FilteringPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LoggingDestination != nil { + in, out := &in.LoggingDestination, &out.LoggingDestination + *out = make([]LoggingDestinationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.StorageDestination != nil { + in, out := &in.StorageDestination, &out.StorageDestination + *out = make([]StorageDestinationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrailID != nil { + in, out := &in.TrailID, &out.TrailID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrailsTrailObservation. +func (in *TrailsTrailObservation) DeepCopy() *TrailsTrailObservation { + if in == nil { + return nil + } + out := new(TrailsTrailObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrailsTrailParameters) DeepCopyInto(out *TrailsTrailParameters) { + *out = *in + if in.DataStreamDestination != nil { + in, out := &in.DataStreamDestination, &out.DataStreamDestination + *out = make([]DataStreamDestinationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilteringPolicy != nil { + in, out := &in.FilteringPolicy, &out.FilteringPolicy + *out = make([]FilteringPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LoggingDestination != nil { + in, out := &in.LoggingDestination, &out.LoggingDestination + *out = make([]LoggingDestinationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageDestination != nil { + in, out := &in.StorageDestination, &out.StorageDestination + *out = make([]StorageDestinationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrailsTrailParameters. +func (in *TrailsTrailParameters) DeepCopy() *TrailsTrailParameters { + if in == nil { + return nil + } + out := new(TrailsTrailParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrailsTrailSpec) DeepCopyInto(out *TrailsTrailSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrailsTrailSpec. +func (in *TrailsTrailSpec) DeepCopy() *TrailsTrailSpec { + if in == nil { + return nil + } + out := new(TrailsTrailSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrailsTrailStatus) DeepCopyInto(out *TrailsTrailStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrailsTrailStatus. +func (in *TrailsTrailStatus) DeepCopy() *TrailsTrailStatus { + if in == nil { + return nil + } + out := new(TrailsTrailStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/audit/v1alpha1/zz_generated.resolvers.go b/apis/audit/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..a6d42cd --- /dev/null +++ b/apis/audit/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this TrailsTrail. +func (mg *TrailsTrail) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/audit/v1alpha1/zz_groupversion_info.go b/apis/audit/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..032a2f4 --- /dev/null +++ b/apis/audit/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=audit.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "audit.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/audit/v1alpha1/zz_trailstrail_terraformed.go b/apis/audit/v1alpha1/zz_trailstrail_terraformed.go new file mode 100755 index 0000000..79cd7f7 --- /dev/null +++ b/apis/audit/v1alpha1/zz_trailstrail_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TrailsTrail +func (mg *TrailsTrail) GetTerraformResourceType() string { + return "yandex_audit_trails_trail" +} + +// GetConnectionDetailsMapping for this TrailsTrail +func (tr *TrailsTrail) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TrailsTrail +func (tr *TrailsTrail) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TrailsTrail +func (tr *TrailsTrail) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TrailsTrail +func (tr *TrailsTrail) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TrailsTrail +func (tr *TrailsTrail) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TrailsTrail +func (tr *TrailsTrail) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TrailsTrail +func (tr *TrailsTrail) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TrailsTrail +func (tr *TrailsTrail) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TrailsTrail using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TrailsTrail) LateInitialize(attrs []byte) (bool, error) { + params := &TrailsTrailParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TrailsTrail) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/audit/v1alpha1/zz_trailstrail_types.go b/apis/audit/v1alpha1/zz_trailstrail_types.go new file mode 100755 index 0000000..c2e2853 --- /dev/null +++ b/apis/audit/v1alpha1/zz_trailstrail_types.go @@ -0,0 +1,792 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AnyFilterInitParameters struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type AnyFilterObservation struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type AnyFilterParameters struct { + + // ID of the parent resource. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type AnyFiltersInitParameters struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type AnyFiltersObservation struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type AnyFiltersParameters struct { + + // ID of the parent resource. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type CategoriesInitParameters struct { + + // Type of the event by its relation to the cloud resource model. Possible values: CONTROL_PLANE/DATA_PLANE. + Plane *string `json:"plane,omitempty" tf:"plane,omitempty"` + + // Type of the event by its operation effect on the resource. Possible values: READ/WRITE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CategoriesObservation struct { + + // Type of the event by its relation to the cloud resource model. Possible values: CONTROL_PLANE/DATA_PLANE. + Plane *string `json:"plane,omitempty" tf:"plane,omitempty"` + + // Type of the event by its operation effect on the resource. Possible values: READ/WRITE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CategoriesParameters struct { + + // Type of the event by its relation to the cloud resource model. Possible values: CONTROL_PLANE/DATA_PLANE. + // +kubebuilder:validation:Optional + Plane *string `json:"plane" tf:"plane,omitempty"` + + // Type of the event by its operation effect on the resource. Possible values: READ/WRITE. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type DataEventsFilterInitParameters struct { + + // A list of events that won't be gathered by the trail from this service. New events will be automatically gathered when this option is specified. Mutually exclusive with included_events. + ExcludedEvents []*string `json:"excludedEvents,omitempty" tf:"excluded_events,omitempty"` + + // A list of events that will be gathered by the trail from this service. New events won't be gathered by default when this option is specified. Mutually exclusive with excluded_events. + IncludedEvents []*string `json:"includedEvents,omitempty" tf:"included_events,omitempty"` + + // Structure describing that events will be gathered from the specified resource. + ResourceScope []ResourceScopeInitParameters `json:"resourceScope,omitempty" tf:"resource_scope,omitempty"` + + // ID of the service which events will be gathered. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type DataEventsFilterObservation struct { + + // A list of events that won't be gathered by the trail from this service. New events will be automatically gathered when this option is specified. Mutually exclusive with included_events. + ExcludedEvents []*string `json:"excludedEvents,omitempty" tf:"excluded_events,omitempty"` + + // A list of events that will be gathered by the trail from this service. New events won't be gathered by default when this option is specified. Mutually exclusive with excluded_events. + IncludedEvents []*string `json:"includedEvents,omitempty" tf:"included_events,omitempty"` + + // Structure describing that events will be gathered from the specified resource. + ResourceScope []ResourceScopeObservation `json:"resourceScope,omitempty" tf:"resource_scope,omitempty"` + + // ID of the service which events will be gathered. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type DataEventsFilterParameters struct { + + // A list of events that won't be gathered by the trail from this service. New events will be automatically gathered when this option is specified. Mutually exclusive with included_events. + // +kubebuilder:validation:Optional + ExcludedEvents []*string `json:"excludedEvents,omitempty" tf:"excluded_events,omitempty"` + + // A list of events that will be gathered by the trail from this service. New events won't be gathered by default when this option is specified. Mutually exclusive with excluded_events. + // +kubebuilder:validation:Optional + IncludedEvents []*string `json:"includedEvents,omitempty" tf:"included_events,omitempty"` + + // Structure describing that events will be gathered from the specified resource. + // +kubebuilder:validation:Optional + ResourceScope []ResourceScopeParameters `json:"resourceScope" tf:"resource_scope,omitempty"` + + // ID of the service which events will be gathered. + // +kubebuilder:validation:Optional + Service *string `json:"service" tf:"service,omitempty"` +} + +type DataStreamDestinationInitParameters struct { + + // ID of the YDB hosting the destination data stream. + DatabaseID *string `json:"databaseId,omitempty" tf:"database_id,omitempty"` + + // Name of the YDS stream belonging to the specified YDB. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type DataStreamDestinationObservation struct { + + // ID of the YDB hosting the destination data stream. + DatabaseID *string `json:"databaseId,omitempty" tf:"database_id,omitempty"` + + // Name of the YDS stream belonging to the specified YDB. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type DataStreamDestinationParameters struct { + + // ID of the YDB hosting the destination data stream. + // +kubebuilder:validation:Optional + DatabaseID *string `json:"databaseId" tf:"database_id,omitempty"` + + // Name of the YDS stream belonging to the specified YDB. + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName" tf:"stream_name,omitempty"` +} + +type EventFiltersInitParameters struct { + + // blocks. With the introduction of included_events/excluded_events you can configure filtering per each event type. + Categories []CategoriesInitParameters `json:"categories,omitempty" tf:"categories,omitempty"` + + // with the appropriate resource_scope blocks. You have to account that resource_scope does not support specifying relations between resources, so your configuration will simplify to only the actual resources, that will be monitored. + PathFilter []PathFilterInitParameters `json:"pathFilter,omitempty" tf:"path_filter,omitempty"` + + // ID of the service which events will be gathered. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type EventFiltersObservation struct { + + // blocks. With the introduction of included_events/excluded_events you can configure filtering per each event type. + Categories []CategoriesObservation `json:"categories,omitempty" tf:"categories,omitempty"` + + // with the appropriate resource_scope blocks. You have to account that resource_scope does not support specifying relations between resources, so your configuration will simplify to only the actual resources, that will be monitored. + PathFilter []PathFilterObservation `json:"pathFilter,omitempty" tf:"path_filter,omitempty"` + + // ID of the service which events will be gathered. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type EventFiltersParameters struct { + + // blocks. With the introduction of included_events/excluded_events you can configure filtering per each event type. + // +kubebuilder:validation:Optional + Categories []CategoriesParameters `json:"categories" tf:"categories,omitempty"` + + // with the appropriate resource_scope blocks. You have to account that resource_scope does not support specifying relations between resources, so your configuration will simplify to only the actual resources, that will be monitored. + // +kubebuilder:validation:Optional + PathFilter []PathFilterParameters `json:"pathFilter" tf:"path_filter,omitempty"` + + // ID of the service which events will be gathered. + // +kubebuilder:validation:Optional + Service *string `json:"service" tf:"service,omitempty"` +} + +type FilterInitParameters struct { + + // Structure describing filtering process for the service-specific data plane events. + EventFilters []EventFiltersInitParameters `json:"eventFilters,omitempty" tf:"event_filters,omitempty"` + + // block with the filtering_policy.management_events_filter. New API states management events filtration in a more clear way. The resources, that were specified, must migrate into the filtering_policy.management_events_filter.resource_scope + PathFilter []FilterPathFilterInitParameters `json:"pathFilter,omitempty" tf:"path_filter,omitempty"` +} + +type FilterObservation struct { + + // Structure describing filtering process for the service-specific data plane events. + EventFilters []EventFiltersObservation `json:"eventFilters,omitempty" tf:"event_filters,omitempty"` + + // block with the filtering_policy.management_events_filter. New API states management events filtration in a more clear way. The resources, that were specified, must migrate into the filtering_policy.management_events_filter.resource_scope + PathFilter []FilterPathFilterObservation `json:"pathFilter,omitempty" tf:"path_filter,omitempty"` +} + +type FilterParameters struct { + + // Structure describing filtering process for the service-specific data plane events. + // +kubebuilder:validation:Optional + EventFilters []EventFiltersParameters `json:"eventFilters,omitempty" tf:"event_filters,omitempty"` + + // block with the filtering_policy.management_events_filter. New API states management events filtration in a more clear way. The resources, that were specified, must migrate into the filtering_policy.management_events_filter.resource_scope + // +kubebuilder:validation:Optional + PathFilter []FilterPathFilterParameters `json:"pathFilter,omitempty" tf:"path_filter,omitempty"` +} + +type FilterPathFilterInitParameters struct { + + // Structure describing that events will be gathered from all cloud resources that belong to the parent resource. Mutually exclusive with some_filter. + AnyFilter []PathFilterAnyFilterInitParameters `json:"anyFilter,omitempty" tf:"any_filter,omitempty"` + + // Structure describing that events will be gathered from some of the cloud resources that belong to the parent resource. Mutually exclusive with any_filter. + SomeFilter []PathFilterSomeFilterInitParameters `json:"someFilter,omitempty" tf:"some_filter,omitempty"` +} + +type FilterPathFilterObservation struct { + + // Structure describing that events will be gathered from all cloud resources that belong to the parent resource. Mutually exclusive with some_filter. + AnyFilter []PathFilterAnyFilterObservation `json:"anyFilter,omitempty" tf:"any_filter,omitempty"` + + // Structure describing that events will be gathered from some of the cloud resources that belong to the parent resource. Mutually exclusive with any_filter. + SomeFilter []PathFilterSomeFilterObservation `json:"someFilter,omitempty" tf:"some_filter,omitempty"` +} + +type FilterPathFilterParameters struct { + + // Structure describing that events will be gathered from all cloud resources that belong to the parent resource. Mutually exclusive with some_filter. + // +kubebuilder:validation:Optional + AnyFilter []PathFilterAnyFilterParameters `json:"anyFilter,omitempty" tf:"any_filter,omitempty"` + + // Structure describing that events will be gathered from some of the cloud resources that belong to the parent resource. Mutually exclusive with any_filter. + // +kubebuilder:validation:Optional + SomeFilter []PathFilterSomeFilterParameters `json:"someFilter,omitempty" tf:"some_filter,omitempty"` +} + +type FilteringPolicyInitParameters struct { + + // Structure describing filtering process for the service-specific data events. + DataEventsFilter []DataEventsFilterInitParameters `json:"dataEventsFilter,omitempty" tf:"data_events_filter,omitempty"` + + // Structure describing filtering process for management events. + ManagementEventsFilter []ManagementEventsFilterInitParameters `json:"managementEventsFilter,omitempty" tf:"management_events_filter,omitempty"` +} + +type FilteringPolicyObservation struct { + + // Structure describing filtering process for the service-specific data events. + DataEventsFilter []DataEventsFilterObservation `json:"dataEventsFilter,omitempty" tf:"data_events_filter,omitempty"` + + // Structure describing filtering process for management events. + ManagementEventsFilter []ManagementEventsFilterObservation `json:"managementEventsFilter,omitempty" tf:"management_events_filter,omitempty"` +} + +type FilteringPolicyParameters struct { + + // Structure describing filtering process for the service-specific data events. + // +kubebuilder:validation:Optional + DataEventsFilter []DataEventsFilterParameters `json:"dataEventsFilter,omitempty" tf:"data_events_filter,omitempty"` + + // Structure describing filtering process for management events. + // +kubebuilder:validation:Optional + ManagementEventsFilter []ManagementEventsFilterParameters `json:"managementEventsFilter,omitempty" tf:"management_events_filter,omitempty"` +} + +type LoggingDestinationInitParameters struct { + + // ID of the destination Cloud Logging Group. + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` +} + +type LoggingDestinationObservation struct { + + // ID of the destination Cloud Logging Group. + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` +} + +type LoggingDestinationParameters struct { + + // ID of the destination Cloud Logging Group. + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId" tf:"log_group_id,omitempty"` +} + +type ManagementEventsFilterInitParameters struct { + + // Structure describing that events will be gathered from the specified resource. + ResourceScope []ManagementEventsFilterResourceScopeInitParameters `json:"resourceScope,omitempty" tf:"resource_scope,omitempty"` +} + +type ManagementEventsFilterObservation struct { + + // Structure describing that events will be gathered from the specified resource. + ResourceScope []ManagementEventsFilterResourceScopeObservation `json:"resourceScope,omitempty" tf:"resource_scope,omitempty"` +} + +type ManagementEventsFilterParameters struct { + + // Structure describing that events will be gathered from the specified resource. + // +kubebuilder:validation:Optional + ResourceScope []ManagementEventsFilterResourceScopeParameters `json:"resourceScope" tf:"resource_scope,omitempty"` +} + +type ManagementEventsFilterResourceScopeInitParameters struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type ManagementEventsFilterResourceScopeObservation struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type ManagementEventsFilterResourceScopeParameters struct { + + // ID of the parent resource. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type PathFilterAnyFilterInitParameters struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type PathFilterAnyFilterObservation struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type PathFilterAnyFilterParameters struct { + + // ID of the parent resource. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type PathFilterInitParameters struct { + + // Structure describing that events will be gathered from all cloud resources that belong to the parent resource. Mutually exclusive with some_filter. + AnyFilter []AnyFilterInitParameters `json:"anyFilter,omitempty" tf:"any_filter,omitempty"` + + // Structure describing that events will be gathered from some of the cloud resources that belong to the parent resource. Mutually exclusive with any_filter. + SomeFilter []SomeFilterInitParameters `json:"someFilter,omitempty" tf:"some_filter,omitempty"` +} + +type PathFilterObservation struct { + + // Structure describing that events will be gathered from all cloud resources that belong to the parent resource. Mutually exclusive with some_filter. + AnyFilter []AnyFilterObservation `json:"anyFilter,omitempty" tf:"any_filter,omitempty"` + + // Structure describing that events will be gathered from some of the cloud resources that belong to the parent resource. Mutually exclusive with any_filter. + SomeFilter []SomeFilterObservation `json:"someFilter,omitempty" tf:"some_filter,omitempty"` +} + +type PathFilterParameters struct { + + // Structure describing that events will be gathered from all cloud resources that belong to the parent resource. Mutually exclusive with some_filter. + // +kubebuilder:validation:Optional + AnyFilter []AnyFilterParameters `json:"anyFilter,omitempty" tf:"any_filter,omitempty"` + + // Structure describing that events will be gathered from some of the cloud resources that belong to the parent resource. Mutually exclusive with any_filter. + // +kubebuilder:validation:Optional + SomeFilter []SomeFilterParameters `json:"someFilter,omitempty" tf:"some_filter,omitempty"` +} + +type PathFilterSomeFilterInitParameters struct { + + // List of child resources from which events will be gathered. + AnyFilters []SomeFilterAnyFiltersInitParameters `json:"anyFilters,omitempty" tf:"any_filters,omitempty"` + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type PathFilterSomeFilterObservation struct { + + // List of child resources from which events will be gathered. + AnyFilters []SomeFilterAnyFiltersObservation `json:"anyFilters,omitempty" tf:"any_filters,omitempty"` + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type PathFilterSomeFilterParameters struct { + + // List of child resources from which events will be gathered. + // +kubebuilder:validation:Optional + AnyFilters []SomeFilterAnyFiltersParameters `json:"anyFilters" tf:"any_filters,omitempty"` + + // ID of the parent resource. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type ResourceScopeInitParameters struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type ResourceScopeObservation struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type ResourceScopeParameters struct { + + // ID of the parent resource. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type SomeFilterAnyFiltersInitParameters struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type SomeFilterAnyFiltersObservation struct { + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type SomeFilterAnyFiltersParameters struct { + + // ID of the parent resource. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type SomeFilterInitParameters struct { + + // List of child resources from which events will be gathered. + AnyFilters []AnyFiltersInitParameters `json:"anyFilters,omitempty" tf:"any_filters,omitempty"` + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type SomeFilterObservation struct { + + // List of child resources from which events will be gathered. + AnyFilters []AnyFiltersObservation `json:"anyFilters,omitempty" tf:"any_filters,omitempty"` + + // ID of the parent resource. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type SomeFilterParameters struct { + + // List of child resources from which events will be gathered. + // +kubebuilder:validation:Optional + AnyFilters []AnyFiltersParameters `json:"anyFilters" tf:"any_filters,omitempty"` + + // ID of the parent resource. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Resource type of the parent resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type StorageDestinationInitParameters struct { + + // Name of the destination bucket. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Additional prefix of the uploaded objects. If not specified, objects will be uploaded with prefix equal to trail_id. + ObjectPrefix *string `json:"objectPrefix,omitempty" tf:"object_prefix,omitempty"` +} + +type StorageDestinationObservation struct { + + // Name of the destination bucket. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Additional prefix of the uploaded objects. If not specified, objects will be uploaded with prefix equal to trail_id. + ObjectPrefix *string `json:"objectPrefix,omitempty" tf:"object_prefix,omitempty"` +} + +type StorageDestinationParameters struct { + + // Name of the destination bucket. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` + + // Additional prefix of the uploaded objects. If not specified, objects will be uploaded with prefix equal to trail_id. + // +kubebuilder:validation:Optional + ObjectPrefix *string `json:"objectPrefix,omitempty" tf:"object_prefix,omitempty"` +} + +type TrailsTrailInitParameters struct { + + // Structure describing destination data stream of the trail. Mutually exclusive with logging_destination and storage_destination. + DataStreamDestination []DataStreamDestinationInitParameters `json:"dataStreamDestination,omitempty" tf:"data_stream_destination,omitempty"` + + // Description of the trail. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Structure describing event filtering process for the trail. + Filter []FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Structure describing event filtering process for the trail. Mutually exclusive with filter. At least one of the management_events_filter or data_events_filter fields will be filled. + FilteringPolicy []FilteringPolicyInitParameters `json:"filteringPolicy,omitempty" tf:"filtering_policy,omitempty"` + + // ID of the folder to which the trail belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels defined by the user. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Structure describing destination log group of the trail. Mutually exclusive with storage_destination and data_stream_destination. + LoggingDestination []LoggingDestinationInitParameters `json:"loggingDestination,omitempty" tf:"logging_destination,omitempty"` + + // Name of the trail. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the IAM service account that is used by the trail. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Structure describing destination bucket of the trail. Mutually exclusive with logging_destination and data_stream_destination. + StorageDestination []StorageDestinationInitParameters `json:"storageDestination,omitempty" tf:"storage_destination,omitempty"` +} + +type TrailsTrailObservation struct { + + // Structure describing destination data stream of the trail. Mutually exclusive with logging_destination and storage_destination. + DataStreamDestination []DataStreamDestinationObservation `json:"dataStreamDestination,omitempty" tf:"data_stream_destination,omitempty"` + + // Description of the trail. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Structure describing event filtering process for the trail. + Filter []FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // Structure describing event filtering process for the trail. Mutually exclusive with filter. At least one of the management_events_filter or data_events_filter fields will be filled. + FilteringPolicy []FilteringPolicyObservation `json:"filteringPolicy,omitempty" tf:"filtering_policy,omitempty"` + + // ID of the folder to which the trail belongs. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels defined by the user. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Structure describing destination log group of the trail. Mutually exclusive with storage_destination and data_stream_destination. + LoggingDestination []LoggingDestinationObservation `json:"loggingDestination,omitempty" tf:"logging_destination,omitempty"` + + // Name of the trail. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the IAM service account that is used by the trail. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Status of this trail. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Structure describing destination bucket of the trail. Mutually exclusive with logging_destination and data_stream_destination. + StorageDestination []StorageDestinationObservation `json:"storageDestination,omitempty" tf:"storage_destination,omitempty"` + + // ID of the trail resource. + TrailID *string `json:"trailId,omitempty" tf:"trail_id,omitempty"` +} + +type TrailsTrailParameters struct { + + // Structure describing destination data stream of the trail. Mutually exclusive with logging_destination and storage_destination. + // +kubebuilder:validation:Optional + DataStreamDestination []DataStreamDestinationParameters `json:"dataStreamDestination,omitempty" tf:"data_stream_destination,omitempty"` + + // Description of the trail. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Structure describing event filtering process for the trail. + // +kubebuilder:validation:Optional + Filter []FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Structure describing event filtering process for the trail. Mutually exclusive with filter. At least one of the management_events_filter or data_events_filter fields will be filled. + // +kubebuilder:validation:Optional + FilteringPolicy []FilteringPolicyParameters `json:"filteringPolicy,omitempty" tf:"filtering_policy,omitempty"` + + // ID of the folder to which the trail belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels defined by the user. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Structure describing destination log group of the trail. Mutually exclusive with storage_destination and data_stream_destination. + // +kubebuilder:validation:Optional + LoggingDestination []LoggingDestinationParameters `json:"loggingDestination,omitempty" tf:"logging_destination,omitempty"` + + // Name of the trail. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the IAM service account that is used by the trail. + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Structure describing destination bucket of the trail. Mutually exclusive with logging_destination and data_stream_destination. + // +kubebuilder:validation:Optional + StorageDestination []StorageDestinationParameters `json:"storageDestination,omitempty" tf:"storage_destination,omitempty"` +} + +// TrailsTrailSpec defines the desired state of TrailsTrail +type TrailsTrailSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TrailsTrailParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TrailsTrailInitParameters `json:"initProvider,omitempty"` +} + +// TrailsTrailStatus defines the observed state of TrailsTrail. +type TrailsTrailStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TrailsTrailObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// TrailsTrail is the Schema for the TrailsTrails API. Manages a trail resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type TrailsTrail struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceAccountId) || (has(self.initProvider) && has(self.initProvider.serviceAccountId))",message="spec.forProvider.serviceAccountId is a required parameter" + Spec TrailsTrailSpec `json:"spec"` + Status TrailsTrailStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TrailsTrailList contains a list of TrailsTrails +type TrailsTrailList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TrailsTrail `json:"items"` +} + +// Repository type metadata. +var ( + TrailsTrail_Kind = "TrailsTrail" + TrailsTrail_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TrailsTrail_Kind}.String() + TrailsTrail_KindAPIVersion = TrailsTrail_Kind + "." + CRDGroupVersion.String() + TrailsTrail_GroupVersionKind = CRDGroupVersion.WithKind(TrailsTrail_Kind) +) + +func init() { + SchemeBuilder.Register(&TrailsTrail{}, &TrailsTrailList{}) +} diff --git a/apis/backup/v1alpha1/zz_generated.conversion_hubs.go b/apis/backup/v1alpha1/zz_generated.conversion_hubs.go index 699abe1..18651b0 100755 --- a/apis/backup/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/backup/v1alpha1/zz_generated.conversion_hubs.go @@ -1,13 +1,9 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *Policy) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *Policy) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *PolicyBindings) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *PolicyBindings) Hub() {} diff --git a/apis/backup/v1alpha1/zz_generated.deepcopy.go b/apis/backup/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..af1f5d7 --- /dev/null +++ b/apis/backup/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1778 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSetsInitParameters) DeepCopyInto(out *BackupSetsInitParameters) { + *out = *in + if in.ExecuteByInterval != nil { + in, out := &in.ExecuteByInterval, &out.ExecuteByInterval + *out = new(float64) + **out = **in + } + if in.ExecuteByTime != nil { + in, out := &in.ExecuteByTime, &out.ExecuteByTime + *out = make([]ExecuteByTimeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSetsInitParameters. +func (in *BackupSetsInitParameters) DeepCopy() *BackupSetsInitParameters { + if in == nil { + return nil + } + out := new(BackupSetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSetsObservation) DeepCopyInto(out *BackupSetsObservation) { + *out = *in + if in.ExecuteByInterval != nil { + in, out := &in.ExecuteByInterval, &out.ExecuteByInterval + *out = new(float64) + **out = **in + } + if in.ExecuteByTime != nil { + in, out := &in.ExecuteByTime, &out.ExecuteByTime + *out = make([]ExecuteByTimeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSetsObservation. +func (in *BackupSetsObservation) DeepCopy() *BackupSetsObservation { + if in == nil { + return nil + } + out := new(BackupSetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSetsParameters) DeepCopyInto(out *BackupSetsParameters) { + *out = *in + if in.ExecuteByInterval != nil { + in, out := &in.ExecuteByInterval, &out.ExecuteByInterval + *out = new(float64) + **out = **in + } + if in.ExecuteByTime != nil { + in, out := &in.ExecuteByTime, &out.ExecuteByTime + *out = make([]ExecuteByTimeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSetsParameters. +func (in *BackupSetsParameters) DeepCopy() *BackupSetsParameters { + if in == nil { + return nil + } + out := new(BackupSetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecuteByTimeInitParameters) DeepCopyInto(out *ExecuteByTimeInitParameters) { + *out = *in + if in.IncludeLastDayOfMonth != nil { + in, out := &in.IncludeLastDayOfMonth, &out.IncludeLastDayOfMonth + *out = new(bool) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RepeatAt != nil { + in, out := &in.RepeatAt, &out.RepeatAt + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepeatEvery != nil { + in, out := &in.RepeatEvery, &out.RepeatEvery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecuteByTimeInitParameters. +func (in *ExecuteByTimeInitParameters) DeepCopy() *ExecuteByTimeInitParameters { + if in == nil { + return nil + } + out := new(ExecuteByTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecuteByTimeObservation) DeepCopyInto(out *ExecuteByTimeObservation) { + *out = *in + if in.IncludeLastDayOfMonth != nil { + in, out := &in.IncludeLastDayOfMonth, &out.IncludeLastDayOfMonth + *out = new(bool) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RepeatAt != nil { + in, out := &in.RepeatAt, &out.RepeatAt + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepeatEvery != nil { + in, out := &in.RepeatEvery, &out.RepeatEvery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecuteByTimeObservation. +func (in *ExecuteByTimeObservation) DeepCopy() *ExecuteByTimeObservation { + if in == nil { + return nil + } + out := new(ExecuteByTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecuteByTimeParameters) DeepCopyInto(out *ExecuteByTimeParameters) { + *out = *in + if in.IncludeLastDayOfMonth != nil { + in, out := &in.IncludeLastDayOfMonth, &out.IncludeLastDayOfMonth + *out = new(bool) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RepeatAt != nil { + in, out := &in.RepeatAt, &out.RepeatAt + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepeatEvery != nil { + in, out := &in.RepeatEvery, &out.RepeatEvery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecuteByTimeParameters. +func (in *ExecuteByTimeParameters) DeepCopy() *ExecuteByTimeParameters { + if in == nil { + return nil + } + out := new(ExecuteByTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyBindings) DeepCopyInto(out *PolicyBindings) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyBindings. +func (in *PolicyBindings) DeepCopy() *PolicyBindings { + if in == nil { + return nil + } + out := new(PolicyBindings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyBindings) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyBindingsInitParameters) DeepCopyInto(out *PolicyBindingsInitParameters) { + *out = *in + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyBindingsInitParameters. +func (in *PolicyBindingsInitParameters) DeepCopy() *PolicyBindingsInitParameters { + if in == nil { + return nil + } + out := new(PolicyBindingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyBindingsList) DeepCopyInto(out *PolicyBindingsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PolicyBindings, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyBindingsList. +func (in *PolicyBindingsList) DeepCopy() *PolicyBindingsList { + if in == nil { + return nil + } + out := new(PolicyBindingsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyBindingsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyBindingsObservation) DeepCopyInto(out *PolicyBindingsObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.Processing != nil { + in, out := &in.Processing, &out.Processing + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyBindingsObservation. +func (in *PolicyBindingsObservation) DeepCopy() *PolicyBindingsObservation { + if in == nil { + return nil + } + out := new(PolicyBindingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyBindingsParameters) DeepCopyInto(out *PolicyBindingsParameters) { + *out = *in + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyBindingsParameters. +func (in *PolicyBindingsParameters) DeepCopy() *PolicyBindingsParameters { + if in == nil { + return nil + } + out := new(PolicyBindingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyBindingsSpec) DeepCopyInto(out *PolicyBindingsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyBindingsSpec. +func (in *PolicyBindingsSpec) DeepCopy() *PolicyBindingsSpec { + if in == nil { + return nil + } + out := new(PolicyBindingsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyBindingsStatus) DeepCopyInto(out *PolicyBindingsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyBindingsStatus. +func (in *PolicyBindingsStatus) DeepCopy() *PolicyBindingsStatus { + if in == nil { + return nil + } + out := new(PolicyBindingsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyInitParameters) DeepCopyInto(out *PolicyInitParameters) { + *out = *in + if in.ArchiveName != nil { + in, out := &in.ArchiveName, &out.ArchiveName + *out = new(string) + **out = **in + } + if in.Cbt != nil { + in, out := &in.Cbt, &out.Cbt + *out = new(string) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.FastBackupEnabled != nil { + in, out := &in.FastBackupEnabled, &out.FastBackupEnabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.MultiVolumeSnapshottingEnabled != nil { + in, out := &in.MultiVolumeSnapshottingEnabled, &out.MultiVolumeSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PerformanceWindowEnabled != nil { + in, out := &in.PerformanceWindowEnabled, &out.PerformanceWindowEnabled + *out = new(bool) + **out = **in + } + if in.PreserveFileSecuritySettings != nil { + in, out := &in.PreserveFileSecuritySettings, &out.PreserveFileSecuritySettings + *out = new(bool) + **out = **in + } + if in.QuiesceSnapshottingEnabled != nil { + in, out := &in.QuiesceSnapshottingEnabled, &out.QuiesceSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Reattempts != nil { + in, out := &in.Reattempts, &out.Reattempts + *out = make([]ReattemptsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = make([]RetentionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = make([]SchedulingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SilentModeEnabled != nil { + in, out := &in.SilentModeEnabled, &out.SilentModeEnabled + *out = new(bool) + **out = **in + } + if in.SplittingBytes != nil { + in, out := &in.SplittingBytes, &out.SplittingBytes + *out = new(string) + **out = **in + } + if in.VMSnapshotReattempts != nil { + in, out := &in.VMSnapshotReattempts, &out.VMSnapshotReattempts + *out = make([]VMSnapshotReattemptsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VssProvider != nil { + in, out := &in.VssProvider, &out.VssProvider + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyInitParameters. +func (in *PolicyInitParameters) DeepCopy() *PolicyInitParameters { + if in == nil { + return nil + } + out := new(PolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { + *out = *in + if in.ArchiveName != nil { + in, out := &in.ArchiveName, &out.ArchiveName + *out = new(string) + **out = **in + } + if in.Cbt != nil { + in, out := &in.Cbt, &out.Cbt + *out = new(string) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FastBackupEnabled != nil { + in, out := &in.FastBackupEnabled, &out.FastBackupEnabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MultiVolumeSnapshottingEnabled != nil { + in, out := &in.MultiVolumeSnapshottingEnabled, &out.MultiVolumeSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PerformanceWindowEnabled != nil { + in, out := &in.PerformanceWindowEnabled, &out.PerformanceWindowEnabled + *out = new(bool) + **out = **in + } + if in.PreserveFileSecuritySettings != nil { + in, out := &in.PreserveFileSecuritySettings, &out.PreserveFileSecuritySettings + *out = new(bool) + **out = **in + } + if in.QuiesceSnapshottingEnabled != nil { + in, out := &in.QuiesceSnapshottingEnabled, &out.QuiesceSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Reattempts != nil { + in, out := &in.Reattempts, &out.Reattempts + *out = make([]ReattemptsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = make([]RetentionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = make([]SchedulingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SilentModeEnabled != nil { + in, out := &in.SilentModeEnabled, &out.SilentModeEnabled + *out = new(bool) + **out = **in + } + if in.SplittingBytes != nil { + in, out := &in.SplittingBytes, &out.SplittingBytes + *out = new(string) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } + if in.VMSnapshotReattempts != nil { + in, out := &in.VMSnapshotReattempts, &out.VMSnapshotReattempts + *out = make([]VMSnapshotReattemptsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VssProvider != nil { + in, out := &in.VssProvider, &out.VssProvider + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. +func (in *PolicyObservation) DeepCopy() *PolicyObservation { + if in == nil { + return nil + } + out := new(PolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { + *out = *in + if in.ArchiveName != nil { + in, out := &in.ArchiveName, &out.ArchiveName + *out = new(string) + **out = **in + } + if in.Cbt != nil { + in, out := &in.Cbt, &out.Cbt + *out = new(string) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.FastBackupEnabled != nil { + in, out := &in.FastBackupEnabled, &out.FastBackupEnabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.MultiVolumeSnapshottingEnabled != nil { + in, out := &in.MultiVolumeSnapshottingEnabled, &out.MultiVolumeSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PerformanceWindowEnabled != nil { + in, out := &in.PerformanceWindowEnabled, &out.PerformanceWindowEnabled + *out = new(bool) + **out = **in + } + if in.PreserveFileSecuritySettings != nil { + in, out := &in.PreserveFileSecuritySettings, &out.PreserveFileSecuritySettings + *out = new(bool) + **out = **in + } + if in.QuiesceSnapshottingEnabled != nil { + in, out := &in.QuiesceSnapshottingEnabled, &out.QuiesceSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Reattempts != nil { + in, out := &in.Reattempts, &out.Reattempts + *out = make([]ReattemptsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = make([]RetentionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = make([]SchedulingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SilentModeEnabled != nil { + in, out := &in.SilentModeEnabled, &out.SilentModeEnabled + *out = new(bool) + **out = **in + } + if in.SplittingBytes != nil { + in, out := &in.SplittingBytes, &out.SplittingBytes + *out = new(string) + **out = **in + } + if in.VMSnapshotReattempts != nil { + in, out := &in.VMSnapshotReattempts, &out.VMSnapshotReattempts + *out = make([]VMSnapshotReattemptsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VssProvider != nil { + in, out := &in.VssProvider, &out.VssProvider + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. +func (in *PolicyParameters) DeepCopy() *PolicyParameters { + if in == nil { + return nil + } + out := new(PolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec. +func (in *PolicySpec) DeepCopy() *PolicySpec { + if in == nil { + return nil + } + out := new(PolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. +func (in *PolicyStatus) DeepCopy() *PolicyStatus { + if in == nil { + return nil + } + out := new(PolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReattemptsInitParameters) DeepCopyInto(out *ReattemptsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReattemptsInitParameters. +func (in *ReattemptsInitParameters) DeepCopy() *ReattemptsInitParameters { + if in == nil { + return nil + } + out := new(ReattemptsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReattemptsObservation) DeepCopyInto(out *ReattemptsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReattemptsObservation. +func (in *ReattemptsObservation) DeepCopy() *ReattemptsObservation { + if in == nil { + return nil + } + out := new(ReattemptsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReattemptsParameters) DeepCopyInto(out *ReattemptsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReattemptsParameters. +func (in *ReattemptsParameters) DeepCopy() *ReattemptsParameters { + if in == nil { + return nil + } + out := new(ReattemptsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionInitParameters) DeepCopyInto(out *RetentionInitParameters) { + *out = *in + if in.AfterBackup != nil { + in, out := &in.AfterBackup, &out.AfterBackup + *out = new(bool) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionInitParameters. +func (in *RetentionInitParameters) DeepCopy() *RetentionInitParameters { + if in == nil { + return nil + } + out := new(RetentionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionObservation) DeepCopyInto(out *RetentionObservation) { + *out = *in + if in.AfterBackup != nil { + in, out := &in.AfterBackup, &out.AfterBackup + *out = new(bool) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionObservation. +func (in *RetentionObservation) DeepCopy() *RetentionObservation { + if in == nil { + return nil + } + out := new(RetentionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionParameters) DeepCopyInto(out *RetentionParameters) { + *out = *in + if in.AfterBackup != nil { + in, out := &in.AfterBackup, &out.AfterBackup + *out = new(bool) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionParameters. +func (in *RetentionParameters) DeepCopy() *RetentionParameters { + if in == nil { + return nil + } + out := new(RetentionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesInitParameters) DeepCopyInto(out *RulesInitParameters) { + *out = *in + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(string) + **out = **in + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.RepeatPeriod != nil { + in, out := &in.RepeatPeriod, &out.RepeatPeriod + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesInitParameters. +func (in *RulesInitParameters) DeepCopy() *RulesInitParameters { + if in == nil { + return nil + } + out := new(RulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesObservation) DeepCopyInto(out *RulesObservation) { + *out = *in + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(string) + **out = **in + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.RepeatPeriod != nil { + in, out := &in.RepeatPeriod, &out.RepeatPeriod + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesObservation. +func (in *RulesObservation) DeepCopy() *RulesObservation { + if in == nil { + return nil + } + out := new(RulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesParameters) DeepCopyInto(out *RulesParameters) { + *out = *in + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(string) + **out = **in + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.RepeatPeriod != nil { + in, out := &in.RepeatPeriod, &out.RepeatPeriod + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesParameters. +func (in *RulesParameters) DeepCopy() *RulesParameters { + if in == nil { + return nil + } + out := new(RulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingExecuteByTimeInitParameters) DeepCopyInto(out *SchedulingExecuteByTimeInitParameters) { + *out = *in + if in.IncludeLastDayOfMonth != nil { + in, out := &in.IncludeLastDayOfMonth, &out.IncludeLastDayOfMonth + *out = new(bool) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RepeatAt != nil { + in, out := &in.RepeatAt, &out.RepeatAt + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepeatEvery != nil { + in, out := &in.RepeatEvery, &out.RepeatEvery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingExecuteByTimeInitParameters. +func (in *SchedulingExecuteByTimeInitParameters) DeepCopy() *SchedulingExecuteByTimeInitParameters { + if in == nil { + return nil + } + out := new(SchedulingExecuteByTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingExecuteByTimeObservation) DeepCopyInto(out *SchedulingExecuteByTimeObservation) { + *out = *in + if in.IncludeLastDayOfMonth != nil { + in, out := &in.IncludeLastDayOfMonth, &out.IncludeLastDayOfMonth + *out = new(bool) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RepeatAt != nil { + in, out := &in.RepeatAt, &out.RepeatAt + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepeatEvery != nil { + in, out := &in.RepeatEvery, &out.RepeatEvery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingExecuteByTimeObservation. +func (in *SchedulingExecuteByTimeObservation) DeepCopy() *SchedulingExecuteByTimeObservation { + if in == nil { + return nil + } + out := new(SchedulingExecuteByTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingExecuteByTimeParameters) DeepCopyInto(out *SchedulingExecuteByTimeParameters) { + *out = *in + if in.IncludeLastDayOfMonth != nil { + in, out := &in.IncludeLastDayOfMonth, &out.IncludeLastDayOfMonth + *out = new(bool) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RepeatAt != nil { + in, out := &in.RepeatAt, &out.RepeatAt + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepeatEvery != nil { + in, out := &in.RepeatEvery, &out.RepeatEvery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingExecuteByTimeParameters. +func (in *SchedulingExecuteByTimeParameters) DeepCopy() *SchedulingExecuteByTimeParameters { + if in == nil { + return nil + } + out := new(SchedulingExecuteByTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingInitParameters) DeepCopyInto(out *SchedulingInitParameters) { + *out = *in + if in.BackupSets != nil { + in, out := &in.BackupSets, &out.BackupSets + *out = make([]BackupSetsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExecuteByInterval != nil { + in, out := &in.ExecuteByInterval, &out.ExecuteByInterval + *out = new(float64) + **out = **in + } + if in.ExecuteByTime != nil { + in, out := &in.ExecuteByTime, &out.ExecuteByTime + *out = make([]SchedulingExecuteByTimeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxParallelBackups != nil { + in, out := &in.MaxParallelBackups, &out.MaxParallelBackups + *out = new(float64) + **out = **in + } + if in.RandomMaxDelay != nil { + in, out := &in.RandomMaxDelay, &out.RandomMaxDelay + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.WeeklyBackupDay != nil { + in, out := &in.WeeklyBackupDay, &out.WeeklyBackupDay + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingInitParameters. +func (in *SchedulingInitParameters) DeepCopy() *SchedulingInitParameters { + if in == nil { + return nil + } + out := new(SchedulingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingObservation) DeepCopyInto(out *SchedulingObservation) { + *out = *in + if in.BackupSets != nil { + in, out := &in.BackupSets, &out.BackupSets + *out = make([]BackupSetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExecuteByInterval != nil { + in, out := &in.ExecuteByInterval, &out.ExecuteByInterval + *out = new(float64) + **out = **in + } + if in.ExecuteByTime != nil { + in, out := &in.ExecuteByTime, &out.ExecuteByTime + *out = make([]SchedulingExecuteByTimeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxParallelBackups != nil { + in, out := &in.MaxParallelBackups, &out.MaxParallelBackups + *out = new(float64) + **out = **in + } + if in.RandomMaxDelay != nil { + in, out := &in.RandomMaxDelay, &out.RandomMaxDelay + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.WeeklyBackupDay != nil { + in, out := &in.WeeklyBackupDay, &out.WeeklyBackupDay + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingObservation. +func (in *SchedulingObservation) DeepCopy() *SchedulingObservation { + if in == nil { + return nil + } + out := new(SchedulingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingParameters) DeepCopyInto(out *SchedulingParameters) { + *out = *in + if in.BackupSets != nil { + in, out := &in.BackupSets, &out.BackupSets + *out = make([]BackupSetsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExecuteByInterval != nil { + in, out := &in.ExecuteByInterval, &out.ExecuteByInterval + *out = new(float64) + **out = **in + } + if in.ExecuteByTime != nil { + in, out := &in.ExecuteByTime, &out.ExecuteByTime + *out = make([]SchedulingExecuteByTimeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxParallelBackups != nil { + in, out := &in.MaxParallelBackups, &out.MaxParallelBackups + *out = new(float64) + **out = **in + } + if in.RandomMaxDelay != nil { + in, out := &in.RandomMaxDelay, &out.RandomMaxDelay + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.WeeklyBackupDay != nil { + in, out := &in.WeeklyBackupDay, &out.WeeklyBackupDay + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingParameters. +func (in *SchedulingParameters) DeepCopy() *SchedulingParameters { + if in == nil { + return nil + } + out := new(SchedulingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSnapshotReattemptsInitParameters) DeepCopyInto(out *VMSnapshotReattemptsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSnapshotReattemptsInitParameters. +func (in *VMSnapshotReattemptsInitParameters) DeepCopy() *VMSnapshotReattemptsInitParameters { + if in == nil { + return nil + } + out := new(VMSnapshotReattemptsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSnapshotReattemptsObservation) DeepCopyInto(out *VMSnapshotReattemptsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSnapshotReattemptsObservation. +func (in *VMSnapshotReattemptsObservation) DeepCopy() *VMSnapshotReattemptsObservation { + if in == nil { + return nil + } + out := new(VMSnapshotReattemptsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSnapshotReattemptsParameters) DeepCopyInto(out *VMSnapshotReattemptsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSnapshotReattemptsParameters. +func (in *VMSnapshotReattemptsParameters) DeepCopy() *VMSnapshotReattemptsParameters { + if in == nil { + return nil + } + out := new(VMSnapshotReattemptsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/backup/v1alpha1/zz_generated.resolvers.go b/apis/backup/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..f1adc77 --- /dev/null +++ b/apis/backup/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,97 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Policy. +func (mg *Policy) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this PolicyBindings. +func (mg *PolicyBindings) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InstanceIDRef, + Selector: mg.Spec.ForProvider.InstanceIDSelector, + To: reference.To{ + List: &v1alpha11.InstanceList{}, + Managed: &v1alpha11.Instance{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceID") + } + mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.InstanceIDRef, + Selector: mg.Spec.InitProvider.InstanceIDSelector, + To: reference.To{ + List: &v1alpha11.InstanceList{}, + Managed: &v1alpha11.Instance{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceID") + } + mg.Spec.InitProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/backup/v1alpha1/zz_groupversion_info.go b/apis/backup/v1alpha1/zz_groupversion_info.go index 1ba9cb3..e41e135 100755 --- a/apis/backup/v1alpha1/zz_groupversion_info.go +++ b/apis/backup/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/backup/v1alpha1/zz_policy_terraformed.go b/apis/backup/v1alpha1/zz_policy_terraformed.go index 40b6888..2ce4736 100755 --- a/apis/backup/v1alpha1/zz_policy_terraformed.go +++ b/apis/backup/v1alpha1/zz_policy_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Policy func (mg *Policy) GetTerraformResourceType() string { - return "yandex_backup_policy" + return "yandex_backup_policy" } // GetConnectionDetailsMapping for this Policy func (tr *Policy) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Policy func (tr *Policy) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Policy func (tr *Policy) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Policy func (tr *Policy) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Policy func (tr *Policy) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Policy func (tr *Policy) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Policy func (tr *Policy) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Policy func (tr *Policy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Policy using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Policy) LateInitialize(attrs []byte) (bool, error) { - params := &PolicyParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &PolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Policy) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/backup/v1alpha1/zz_policy_types.go b/apis/backup/v1alpha1/zz_policy_types.go index 60e8bd0..a1c0f7e 100755 --- a/apis/backup/v1alpha1/zz_policy_types.go +++ b/apis/backup/v1alpha1/zz_policy_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,718 +7,661 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type BackupSetsInitParameters struct { + // — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` -// — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. -ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` + // — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByTime []ExecuteByTimeInitParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` -// — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. -ExecuteByTime []ExecuteByTimeInitParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` - -// — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type BackupSetsObservation struct { + // — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` -// — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. -ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` + // — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByTime []ExecuteByTimeObservation `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` -// — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. -ExecuteByTime []ExecuteByTimeObservation `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` - -// — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type BackupSetsParameters struct { + // — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + // +kubebuilder:validation:Optional + ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` -// — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. -// +kubebuilder:validation:Optional -ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` + // — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. + // +kubebuilder:validation:Optional + ExecuteByTime []ExecuteByTimeParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` -// — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. -// +kubebuilder:validation:Optional -ExecuteByTime []ExecuteByTimeParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` - -// — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type ExecuteByTimeInitParameters struct { + // — If true, schedule will be applied on the last day of month. See day_type for available values. + IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` -// — If true, schedule will be applied on the last day of month. See day_type for available values. -IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` + // — List of days when schedule applies. Used in "MONTHLY" type. + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` -// — List of days when schedule applies. Used in "MONTHLY" type. -Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + // — seconds + Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` -// — seconds -Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` + // hours format), when the schedule applies. + RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` -// hours format), when the schedule applies. -RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` + // — Frequency of backup repetition. See interval_type for available values. + RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` -// — Frequency of backup repetition. See interval_type for available values. -RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// — List of weekdays when the backup will be applied. Used in "WEEKLY" type. -Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + // — List of weekdays when the backup will be applied. Used in "WEEKLY" type. + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` } - type ExecuteByTimeObservation struct { + // — If true, schedule will be applied on the last day of month. See day_type for available values. + IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` -// — If true, schedule will be applied on the last day of month. See day_type for available values. -IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` - -// — List of days when schedule applies. Used in "MONTHLY" type. -Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + // — List of days when schedule applies. Used in "MONTHLY" type. + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` -// — seconds -Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` + // — seconds + Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` -// hours format), when the schedule applies. -RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` + // hours format), when the schedule applies. + RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` -// — Frequency of backup repetition. See interval_type for available values. -RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` + // — Frequency of backup repetition. See interval_type for available values. + RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` -// — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// — List of weekdays when the backup will be applied. Used in "WEEKLY" type. -Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + // — List of weekdays when the backup will be applied. Used in "WEEKLY" type. + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` } - type ExecuteByTimeParameters struct { + // — If true, schedule will be applied on the last day of month. See day_type for available values. + // +kubebuilder:validation:Optional + IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` -// — If true, schedule will be applied on the last day of month. See day_type for available values. -// +kubebuilder:validation:Optional -IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` - -// — List of days when schedule applies. Used in "MONTHLY" type. -// +kubebuilder:validation:Optional -Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + // — List of days when schedule applies. Used in "MONTHLY" type. + // +kubebuilder:validation:Optional + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` -// — seconds -// +kubebuilder:validation:Optional -Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` + // — seconds + // +kubebuilder:validation:Optional + Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` -// hours format), when the schedule applies. -// +kubebuilder:validation:Optional -RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` + // hours format), when the schedule applies. + // +kubebuilder:validation:Optional + RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` -// — Frequency of backup repetition. See interval_type for available values. -// +kubebuilder:validation:Optional -RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` + // — Frequency of backup repetition. See interval_type for available values. + // +kubebuilder:validation:Optional + RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` -// — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` -// — List of weekdays when the backup will be applied. Used in "WEEKLY" type. -// +kubebuilder:validation:Optional -Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + // — List of weekdays when the backup will be applied. Used in "WEEKLY" type. + // +kubebuilder:validation:Optional + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` } - type PolicyInitParameters struct { + // [Plan ID]-[Unique ID]a) — The name of generated archives. + ArchiveName *string `json:"archiveName,omitempty" tf:"archive_name,omitempty"` -// [Plan ID]-[Unique ID]a) — The name of generated archives. -ArchiveName *string `json:"archiveName,omitempty" tf:"archive_name,omitempty"` - -// — Configuration of Changed Block Tracking. Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". -Cbt *string `json:"cbt,omitempty" tf:"cbt,omitempty"` + // — Configuration of Changed Block Tracking. Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". + Cbt *string `json:"cbt,omitempty" tf:"cbt,omitempty"` -// — Archive compression level. Affects CPU. Available values: "NORMAL", "HIGH", "MAX", "OFF". -Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + // — Archive compression level. Affects CPU. Available values: "NORMAL", "HIGH", "MAX", "OFF". + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` -// — Enable flag -FastBackupEnabled *bool `json:"fastBackupEnabled,omitempty" tf:"fast_backup_enabled,omitempty"` + // — Enable flag + FastBackupEnabled *bool `json:"fastBackupEnabled,omitempty" tf:"fast_backup_enabled,omitempty"` -// — days -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // — days + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". Available values: "AUTO", "VERSION_11", "VERSION_12". -Format *string `json:"format,omitempty" tf:"format,omitempty"` + // — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". Available values: "AUTO", "VERSION_11", "VERSION_12". + Format *string `json:"format,omitempty" tf:"format,omitempty"` -// — If true, snapshots of multiple volumes will be taken simultaneously. -MultiVolumeSnapshottingEnabled *bool `json:"multiVolumeSnapshottingEnabled,omitempty" tf:"multi_volume_snapshotting_enabled,omitempty"` + // — If true, snapshots of multiple volumes will be taken simultaneously. + MultiVolumeSnapshottingEnabled *bool `json:"multiVolumeSnapshottingEnabled,omitempty" tf:"multi_volume_snapshotting_enabled,omitempty"` -// — Name of the policy -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // — Name of the policy + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// — Time windows for performance limitations of backup. -PerformanceWindowEnabled *bool `json:"performanceWindowEnabled,omitempty" tf:"performance_window_enabled,omitempty"` + // — Time windows for performance limitations of backup. + PerformanceWindowEnabled *bool `json:"performanceWindowEnabled,omitempty" tf:"performance_window_enabled,omitempty"` -// — Preserves file security settings. It's better to set this option to true. -PreserveFileSecuritySettings *bool `json:"preserveFileSecuritySettings,omitempty" tf:"preserve_file_security_settings,omitempty"` + // — Preserves file security settings. It's better to set this option to true. + PreserveFileSecuritySettings *bool `json:"preserveFileSecuritySettings,omitempty" tf:"preserve_file_security_settings,omitempty"` -// — If true, a quiesced snapshot of the virtual machine will be taken. -QuiesceSnapshottingEnabled *bool `json:"quiesceSnapshottingEnabled,omitempty" tf:"quiesce_snapshotting_enabled,omitempty"` + // — If true, a quiesced snapshot of the virtual machine will be taken. + QuiesceSnapshottingEnabled *bool `json:"quiesceSnapshottingEnabled,omitempty" tf:"quiesce_snapshotting_enabled,omitempty"` -// — Amount of reattempts that should be performed while trying to make backup at the host. This attribute consists of the following parameters: -Reattempts []ReattemptsInitParameters `json:"reattempts,omitempty" tf:"reattempts,omitempty"` + // — Amount of reattempts that should be performed while trying to make backup at the host. This attribute consists of the following parameters: + Reattempts []ReattemptsInitParameters `json:"reattempts,omitempty" tf:"reattempts,omitempty"` -// — Retention policy for backups. Allows to setup backups lifecycle. This attribute consists of the following parameters: -Retention []RetentionInitParameters `json:"retention,omitempty" tf:"retention,omitempty"` + // — Retention policy for backups. Allows to setup backups lifecycle. This attribute consists of the following parameters: + Retention []RetentionInitParameters `json:"retention,omitempty" tf:"retention,omitempty"` -// — Schedule settings for creating backups on the host. -Scheduling []SchedulingInitParameters `json:"scheduling,omitempty" tf:"scheduling,omitempty"` + // — Schedule settings for creating backups on the host. + Scheduling []SchedulingInitParameters `json:"scheduling,omitempty" tf:"scheduling,omitempty"` -// — if true, a user interaction will be avoided when possible. -SilentModeEnabled *bool `json:"silentModeEnabled,omitempty" tf:"silent_mode_enabled,omitempty"` + // — if true, a user interaction will be avoided when possible. + SilentModeEnabled *bool `json:"silentModeEnabled,omitempty" tf:"silent_mode_enabled,omitempty"` -// — determines the size to split backups. It's better to leave this option unchanged. -SplittingBytes *string `json:"splittingBytes,omitempty" tf:"splitting_bytes,omitempty"` + // — determines the size to split backups. It's better to leave this option unchanged. + SplittingBytes *string `json:"splittingBytes,omitempty" tf:"splitting_bytes,omitempty"` -// (Requied) — Amount of reattempts that should be performed while trying to make snapshot. This attribute consists of the following parameters: -VMSnapshotReattempts []VMSnapshotReattemptsInitParameters `json:"vmSnapshotReattempts,omitempty" tf:"vm_snapshot_reattempts,omitempty"` + // (Requied) — Amount of reattempts that should be performed while trying to make snapshot. This attribute consists of the following parameters: + VMSnapshotReattempts []VMSnapshotReattemptsInitParameters `json:"vmSnapshotReattempts,omitempty" tf:"vm_snapshot_reattempts,omitempty"` -// — Settings for the volume shadow copy service. Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" -VssProvider *string `json:"vssProvider,omitempty" tf:"vss_provider,omitempty"` + // — Settings for the volume shadow copy service. Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" + VssProvider *string `json:"vssProvider,omitempty" tf:"vss_provider,omitempty"` } - type PolicyObservation struct { + // [Plan ID]-[Unique ID]a) — The name of generated archives. + ArchiveName *string `json:"archiveName,omitempty" tf:"archive_name,omitempty"` -// [Plan ID]-[Unique ID]a) — The name of generated archives. -ArchiveName *string `json:"archiveName,omitempty" tf:"archive_name,omitempty"` - -// — Configuration of Changed Block Tracking. Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". -Cbt *string `json:"cbt,omitempty" tf:"cbt,omitempty"` + // — Configuration of Changed Block Tracking. Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". + Cbt *string `json:"cbt,omitempty" tf:"cbt,omitempty"` -// — Archive compression level. Affects CPU. Available values: "NORMAL", "HIGH", "MAX", "OFF". -Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + // — Archive compression level. Affects CPU. Available values: "NORMAL", "HIGH", "MAX", "OFF". + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// — Enable flag -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Enable flag -FastBackupEnabled *bool `json:"fastBackupEnabled,omitempty" tf:"fast_backup_enabled,omitempty"` + // — Enable flag + FastBackupEnabled *bool `json:"fastBackupEnabled,omitempty" tf:"fast_backup_enabled,omitempty"` -// — days -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // — days + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". Available values: "AUTO", "VERSION_11", "VERSION_12". -Format *string `json:"format,omitempty" tf:"format,omitempty"` + // — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". Available values: "AUTO", "VERSION_11", "VERSION_12". + Format *string `json:"format,omitempty" tf:"format,omitempty"` -// — days -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // — days + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// — If true, snapshots of multiple volumes will be taken simultaneously. -MultiVolumeSnapshottingEnabled *bool `json:"multiVolumeSnapshottingEnabled,omitempty" tf:"multi_volume_snapshotting_enabled,omitempty"` + // — If true, snapshots of multiple volumes will be taken simultaneously. + MultiVolumeSnapshottingEnabled *bool `json:"multiVolumeSnapshottingEnabled,omitempty" tf:"multi_volume_snapshotting_enabled,omitempty"` -// — Name of the policy -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // — Name of the policy + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// — Time windows for performance limitations of backup. -PerformanceWindowEnabled *bool `json:"performanceWindowEnabled,omitempty" tf:"performance_window_enabled,omitempty"` + // — Time windows for performance limitations of backup. + PerformanceWindowEnabled *bool `json:"performanceWindowEnabled,omitempty" tf:"performance_window_enabled,omitempty"` -// — Preserves file security settings. It's better to set this option to true. -PreserveFileSecuritySettings *bool `json:"preserveFileSecuritySettings,omitempty" tf:"preserve_file_security_settings,omitempty"` + // — Preserves file security settings. It's better to set this option to true. + PreserveFileSecuritySettings *bool `json:"preserveFileSecuritySettings,omitempty" tf:"preserve_file_security_settings,omitempty"` -// — If true, a quiesced snapshot of the virtual machine will be taken. -QuiesceSnapshottingEnabled *bool `json:"quiesceSnapshottingEnabled,omitempty" tf:"quiesce_snapshotting_enabled,omitempty"` + // — If true, a quiesced snapshot of the virtual machine will be taken. + QuiesceSnapshottingEnabled *bool `json:"quiesceSnapshottingEnabled,omitempty" tf:"quiesce_snapshotting_enabled,omitempty"` -// — Amount of reattempts that should be performed while trying to make backup at the host. This attribute consists of the following parameters: -Reattempts []ReattemptsObservation `json:"reattempts,omitempty" tf:"reattempts,omitempty"` + // — Amount of reattempts that should be performed while trying to make backup at the host. This attribute consists of the following parameters: + Reattempts []ReattemptsObservation `json:"reattempts,omitempty" tf:"reattempts,omitempty"` -// — Retention policy for backups. Allows to setup backups lifecycle. This attribute consists of the following parameters: -Retention []RetentionObservation `json:"retention,omitempty" tf:"retention,omitempty"` + // — Retention policy for backups. Allows to setup backups lifecycle. This attribute consists of the following parameters: + Retention []RetentionObservation `json:"retention,omitempty" tf:"retention,omitempty"` -// — Schedule settings for creating backups on the host. -Scheduling []SchedulingObservation `json:"scheduling,omitempty" tf:"scheduling,omitempty"` + // — Schedule settings for creating backups on the host. + Scheduling []SchedulingObservation `json:"scheduling,omitempty" tf:"scheduling,omitempty"` -// — if true, a user interaction will be avoided when possible. -SilentModeEnabled *bool `json:"silentModeEnabled,omitempty" tf:"silent_mode_enabled,omitempty"` + // — if true, a user interaction will be avoided when possible. + SilentModeEnabled *bool `json:"silentModeEnabled,omitempty" tf:"silent_mode_enabled,omitempty"` -// — determines the size to split backups. It's better to leave this option unchanged. -SplittingBytes *string `json:"splittingBytes,omitempty" tf:"splitting_bytes,omitempty"` + // — determines the size to split backups. It's better to leave this option unchanged. + SplittingBytes *string `json:"splittingBytes,omitempty" tf:"splitting_bytes,omitempty"` -UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` -// (Requied) — Amount of reattempts that should be performed while trying to make snapshot. This attribute consists of the following parameters: -VMSnapshotReattempts []VMSnapshotReattemptsObservation `json:"vmSnapshotReattempts,omitempty" tf:"vm_snapshot_reattempts,omitempty"` + // (Requied) — Amount of reattempts that should be performed while trying to make snapshot. This attribute consists of the following parameters: + VMSnapshotReattempts []VMSnapshotReattemptsObservation `json:"vmSnapshotReattempts,omitempty" tf:"vm_snapshot_reattempts,omitempty"` -// — Settings for the volume shadow copy service. Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" -VssProvider *string `json:"vssProvider,omitempty" tf:"vss_provider,omitempty"` + // — Settings for the volume shadow copy service. Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" + VssProvider *string `json:"vssProvider,omitempty" tf:"vss_provider,omitempty"` } - type PolicyParameters struct { + // [Plan ID]-[Unique ID]a) — The name of generated archives. + // +kubebuilder:validation:Optional + ArchiveName *string `json:"archiveName,omitempty" tf:"archive_name,omitempty"` -// [Plan ID]-[Unique ID]a) — The name of generated archives. -// +kubebuilder:validation:Optional -ArchiveName *string `json:"archiveName,omitempty" tf:"archive_name,omitempty"` - -// — Configuration of Changed Block Tracking. Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". -// +kubebuilder:validation:Optional -Cbt *string `json:"cbt,omitempty" tf:"cbt,omitempty"` + // — Configuration of Changed Block Tracking. Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". + // +kubebuilder:validation:Optional + Cbt *string `json:"cbt,omitempty" tf:"cbt,omitempty"` -// — Archive compression level. Affects CPU. Available values: "NORMAL", "HIGH", "MAX", "OFF". -// +kubebuilder:validation:Optional -Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + // — Archive compression level. Affects CPU. Available values: "NORMAL", "HIGH", "MAX", "OFF". + // +kubebuilder:validation:Optional + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` -// — Enable flag -// +kubebuilder:validation:Optional -FastBackupEnabled *bool `json:"fastBackupEnabled,omitempty" tf:"fast_backup_enabled,omitempty"` + // — Enable flag + // +kubebuilder:validation:Optional + FastBackupEnabled *bool `json:"fastBackupEnabled,omitempty" tf:"fast_backup_enabled,omitempty"` -// — days -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // — days + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". Available values: "AUTO", "VERSION_11", "VERSION_12". -// +kubebuilder:validation:Optional -Format *string `json:"format,omitempty" tf:"format,omitempty"` + // — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". Available values: "AUTO", "VERSION_11", "VERSION_12". + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` -// — If true, snapshots of multiple volumes will be taken simultaneously. -// +kubebuilder:validation:Optional -MultiVolumeSnapshottingEnabled *bool `json:"multiVolumeSnapshottingEnabled,omitempty" tf:"multi_volume_snapshotting_enabled,omitempty"` + // — If true, snapshots of multiple volumes will be taken simultaneously. + // +kubebuilder:validation:Optional + MultiVolumeSnapshottingEnabled *bool `json:"multiVolumeSnapshottingEnabled,omitempty" tf:"multi_volume_snapshotting_enabled,omitempty"` -// — Name of the policy -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // — Name of the policy + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// — Time windows for performance limitations of backup. -// +kubebuilder:validation:Optional -PerformanceWindowEnabled *bool `json:"performanceWindowEnabled,omitempty" tf:"performance_window_enabled,omitempty"` + // — Time windows for performance limitations of backup. + // +kubebuilder:validation:Optional + PerformanceWindowEnabled *bool `json:"performanceWindowEnabled,omitempty" tf:"performance_window_enabled,omitempty"` -// — Preserves file security settings. It's better to set this option to true. -// +kubebuilder:validation:Optional -PreserveFileSecuritySettings *bool `json:"preserveFileSecuritySettings,omitempty" tf:"preserve_file_security_settings,omitempty"` + // — Preserves file security settings. It's better to set this option to true. + // +kubebuilder:validation:Optional + PreserveFileSecuritySettings *bool `json:"preserveFileSecuritySettings,omitempty" tf:"preserve_file_security_settings,omitempty"` -// — If true, a quiesced snapshot of the virtual machine will be taken. -// +kubebuilder:validation:Optional -QuiesceSnapshottingEnabled *bool `json:"quiesceSnapshottingEnabled,omitempty" tf:"quiesce_snapshotting_enabled,omitempty"` + // — If true, a quiesced snapshot of the virtual machine will be taken. + // +kubebuilder:validation:Optional + QuiesceSnapshottingEnabled *bool `json:"quiesceSnapshottingEnabled,omitempty" tf:"quiesce_snapshotting_enabled,omitempty"` -// — Amount of reattempts that should be performed while trying to make backup at the host. This attribute consists of the following parameters: -// +kubebuilder:validation:Optional -Reattempts []ReattemptsParameters `json:"reattempts,omitempty" tf:"reattempts,omitempty"` + // — Amount of reattempts that should be performed while trying to make backup at the host. This attribute consists of the following parameters: + // +kubebuilder:validation:Optional + Reattempts []ReattemptsParameters `json:"reattempts,omitempty" tf:"reattempts,omitempty"` -// — Retention policy for backups. Allows to setup backups lifecycle. This attribute consists of the following parameters: -// +kubebuilder:validation:Optional -Retention []RetentionParameters `json:"retention,omitempty" tf:"retention,omitempty"` + // — Retention policy for backups. Allows to setup backups lifecycle. This attribute consists of the following parameters: + // +kubebuilder:validation:Optional + Retention []RetentionParameters `json:"retention,omitempty" tf:"retention,omitempty"` -// — Schedule settings for creating backups on the host. -// +kubebuilder:validation:Optional -Scheduling []SchedulingParameters `json:"scheduling,omitempty" tf:"scheduling,omitempty"` + // — Schedule settings for creating backups on the host. + // +kubebuilder:validation:Optional + Scheduling []SchedulingParameters `json:"scheduling,omitempty" tf:"scheduling,omitempty"` -// — if true, a user interaction will be avoided when possible. -// +kubebuilder:validation:Optional -SilentModeEnabled *bool `json:"silentModeEnabled,omitempty" tf:"silent_mode_enabled,omitempty"` + // — if true, a user interaction will be avoided when possible. + // +kubebuilder:validation:Optional + SilentModeEnabled *bool `json:"silentModeEnabled,omitempty" tf:"silent_mode_enabled,omitempty"` -// — determines the size to split backups. It's better to leave this option unchanged. -// +kubebuilder:validation:Optional -SplittingBytes *string `json:"splittingBytes,omitempty" tf:"splitting_bytes,omitempty"` + // — determines the size to split backups. It's better to leave this option unchanged. + // +kubebuilder:validation:Optional + SplittingBytes *string `json:"splittingBytes,omitempty" tf:"splitting_bytes,omitempty"` -// (Requied) — Amount of reattempts that should be performed while trying to make snapshot. This attribute consists of the following parameters: -// +kubebuilder:validation:Optional -VMSnapshotReattempts []VMSnapshotReattemptsParameters `json:"vmSnapshotReattempts,omitempty" tf:"vm_snapshot_reattempts,omitempty"` + // (Requied) — Amount of reattempts that should be performed while trying to make snapshot. This attribute consists of the following parameters: + // +kubebuilder:validation:Optional + VMSnapshotReattempts []VMSnapshotReattemptsParameters `json:"vmSnapshotReattempts,omitempty" tf:"vm_snapshot_reattempts,omitempty"` -// — Settings for the volume shadow copy service. Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" -// +kubebuilder:validation:Optional -VssProvider *string `json:"vssProvider,omitempty" tf:"vss_provider,omitempty"` + // — Settings for the volume shadow copy service. Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" + // +kubebuilder:validation:Optional + VssProvider *string `json:"vssProvider,omitempty" tf:"vss_provider,omitempty"` } - type ReattemptsInitParameters struct { + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Enable flag -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -// — Retry interval. See interval_type for available values -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // — Retry interval. See interval_type for available values + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// — Maximum number of attempts before throwing an error -MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + // — Maximum number of attempts before throwing an error + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` } - type ReattemptsObservation struct { + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Enable flag -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -// — Retry interval. See interval_type for available values -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // — Retry interval. See interval_type for available values + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// — Maximum number of attempts before throwing an error -MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + // — Maximum number of attempts before throwing an error + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` } - type ReattemptsParameters struct { + // — Enable flag + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Enable flag -// +kubebuilder:validation:Optional -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -// — Retry interval. See interval_type for available values -// +kubebuilder:validation:Optional -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // — Retry interval. See interval_type for available values + // +kubebuilder:validation:Optional + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// — Maximum number of attempts before throwing an error -// +kubebuilder:validation:Optional -MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + // — Maximum number of attempts before throwing an error + // +kubebuilder:validation:Optional + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` } - type RetentionInitParameters struct { + // — Defines whether retention rule applies after creating backup or before. + AfterBackup *bool `json:"afterBackup,omitempty" tf:"after_backup,omitempty"` -// — Defines whether retention rule applies after creating backup or before. -AfterBackup *bool `json:"afterBackup,omitempty" tf:"after_backup,omitempty"` - -// — seconds -Rules []RulesInitParameters `json:"rules,omitempty" tf:"rules,omitempty"` + // — seconds + Rules []RulesInitParameters `json:"rules,omitempty" tf:"rules,omitempty"` } - type RetentionObservation struct { + // — Defines whether retention rule applies after creating backup or before. + AfterBackup *bool `json:"afterBackup,omitempty" tf:"after_backup,omitempty"` -// — Defines whether retention rule applies after creating backup or before. -AfterBackup *bool `json:"afterBackup,omitempty" tf:"after_backup,omitempty"` - -// — seconds -Rules []RulesObservation `json:"rules,omitempty" tf:"rules,omitempty"` + // — seconds + Rules []RulesObservation `json:"rules,omitempty" tf:"rules,omitempty"` } - type RetentionParameters struct { + // — Defines whether retention rule applies after creating backup or before. + // +kubebuilder:validation:Optional + AfterBackup *bool `json:"afterBackup,omitempty" tf:"after_backup,omitempty"` -// — Defines whether retention rule applies after creating backup or before. -// +kubebuilder:validation:Optional -AfterBackup *bool `json:"afterBackup,omitempty" tf:"after_backup,omitempty"` - -// — seconds -// +kubebuilder:validation:Optional -Rules []RulesParameters `json:"rules,omitempty" tf:"rules,omitempty"` + // — seconds + // +kubebuilder:validation:Optional + Rules []RulesParameters `json:"rules,omitempty" tf:"rules,omitempty"` } - type RulesInitParameters struct { + // (Conflicts with max_count) — Deletes backups that older than max_age. Exactly one of max_count or max_age should be set. + MaxAge *string `json:"maxAge,omitempty" tf:"max_age,omitempty"` -// (Conflicts with max_count) — Deletes backups that older than max_age. Exactly one of max_count or max_age should be set. -MaxAge *string `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // (Conflicts with max_age) — Deletes backups if it's count exceeds max_count. Exactly one of max_count or max_age should be set. + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` -// (Conflicts with max_age) — Deletes backups if it's count exceeds max_count. Exactly one of max_count or max_age should be set. -MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` - -// — days -RepeatPeriod []*string `json:"repeatPeriod,omitempty" tf:"repeat_period,omitempty"` + // — days + RepeatPeriod []*string `json:"repeatPeriod,omitempty" tf:"repeat_period,omitempty"` } - type RulesObservation struct { + // (Conflicts with max_count) — Deletes backups that older than max_age. Exactly one of max_count or max_age should be set. + MaxAge *string `json:"maxAge,omitempty" tf:"max_age,omitempty"` -// (Conflicts with max_count) — Deletes backups that older than max_age. Exactly one of max_count or max_age should be set. -MaxAge *string `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // (Conflicts with max_age) — Deletes backups if it's count exceeds max_count. Exactly one of max_count or max_age should be set. + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` -// (Conflicts with max_age) — Deletes backups if it's count exceeds max_count. Exactly one of max_count or max_age should be set. -MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` - -// — days -RepeatPeriod []*string `json:"repeatPeriod,omitempty" tf:"repeat_period,omitempty"` + // — days + RepeatPeriod []*string `json:"repeatPeriod,omitempty" tf:"repeat_period,omitempty"` } - type RulesParameters struct { + // (Conflicts with max_count) — Deletes backups that older than max_age. Exactly one of max_count or max_age should be set. + // +kubebuilder:validation:Optional + MaxAge *string `json:"maxAge,omitempty" tf:"max_age,omitempty"` -// (Conflicts with max_count) — Deletes backups that older than max_age. Exactly one of max_count or max_age should be set. -// +kubebuilder:validation:Optional -MaxAge *string `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // (Conflicts with max_age) — Deletes backups if it's count exceeds max_count. Exactly one of max_count or max_age should be set. + // +kubebuilder:validation:Optional + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` -// (Conflicts with max_age) — Deletes backups if it's count exceeds max_count. Exactly one of max_count or max_age should be set. -// +kubebuilder:validation:Optional -MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` - -// — days -// +kubebuilder:validation:Optional -RepeatPeriod []*string `json:"repeatPeriod,omitempty" tf:"repeat_period,omitempty"` + // — days + // +kubebuilder:validation:Optional + RepeatPeriod []*string `json:"repeatPeriod,omitempty" tf:"repeat_period,omitempty"` } - type SchedulingExecuteByTimeInitParameters struct { + // — If true, schedule will be applied on the last day of month. See day_type for available values. + IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` -// — If true, schedule will be applied on the last day of month. See day_type for available values. -IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` + // — List of days when schedule applies. Used in "MONTHLY" type. + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` -// — List of days when schedule applies. Used in "MONTHLY" type. -Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + // — seconds + Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` -// — seconds -Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` + // hours format), when the schedule applies. + RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` -// hours format), when the schedule applies. -RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` + // — Frequency of backup repetition. See interval_type for available values. + RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` -// — Frequency of backup repetition. See interval_type for available values. -RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// — List of weekdays when the backup will be applied. Used in "WEEKLY" type. -Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + // — List of weekdays when the backup will be applied. Used in "WEEKLY" type. + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` } - type SchedulingExecuteByTimeObservation struct { + // — If true, schedule will be applied on the last day of month. See day_type for available values. + IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` -// — If true, schedule will be applied on the last day of month. See day_type for available values. -IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` - -// — List of days when schedule applies. Used in "MONTHLY" type. -Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + // — List of days when schedule applies. Used in "MONTHLY" type. + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` -// — seconds -Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` + // — seconds + Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` -// hours format), when the schedule applies. -RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` + // hours format), when the schedule applies. + RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` -// — Frequency of backup repetition. See interval_type for available values. -RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` + // — Frequency of backup repetition. See interval_type for available values. + RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` -// — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// — List of weekdays when the backup will be applied. Used in "WEEKLY" type. -Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + // — List of weekdays when the backup will be applied. Used in "WEEKLY" type. + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` } - type SchedulingExecuteByTimeParameters struct { + // — If true, schedule will be applied on the last day of month. See day_type for available values. + // +kubebuilder:validation:Optional + IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` -// — If true, schedule will be applied on the last day of month. See day_type for available values. -// +kubebuilder:validation:Optional -IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` - -// — List of days when schedule applies. Used in "MONTHLY" type. -// +kubebuilder:validation:Optional -Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + // — List of days when schedule applies. Used in "MONTHLY" type. + // +kubebuilder:validation:Optional + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` -// — seconds -// +kubebuilder:validation:Optional -Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` + // — seconds + // +kubebuilder:validation:Optional + Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` -// hours format), when the schedule applies. -// +kubebuilder:validation:Optional -RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` + // hours format), when the schedule applies. + // +kubebuilder:validation:Optional + RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` -// — Frequency of backup repetition. See interval_type for available values. -// +kubebuilder:validation:Optional -RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` + // — Frequency of backup repetition. See interval_type for available values. + // +kubebuilder:validation:Optional + RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` -// — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` -// — List of weekdays when the backup will be applied. Used in "WEEKLY" type. -// +kubebuilder:validation:Optional -Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + // — List of weekdays when the backup will be applied. Used in "WEEKLY" type. + // +kubebuilder:validation:Optional + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` } - type SchedulingInitParameters struct { + // A list of schedules with backup sets that compose the whole scheme. + BackupSets []BackupSetsInitParameters `json:"backupSets,omitempty" tf:"backup_sets,omitempty"` -// A list of schedules with backup sets that compose the whole scheme. -BackupSets []BackupSetsInitParameters `json:"backupSets,omitempty" tf:"backup_sets,omitempty"` - -// — Enable flag -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. -ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` + // — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` -// — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. -ExecuteByTime []SchedulingExecuteByTimeInitParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` + // — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByTime []SchedulingExecuteByTimeInitParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` -// — Maximum number of backup processes allowed to run in parallel. 0 for unlimited. -MaxParallelBackups *float64 `json:"maxParallelBackups,omitempty" tf:"max_parallel_backups,omitempty"` + // — Maximum number of backup processes allowed to run in parallel. 0 for unlimited. + MaxParallelBackups *float64 `json:"maxParallelBackups,omitempty" tf:"max_parallel_backups,omitempty"` -// — Configuration of the random delay between the execution of parallel tasks. See interval_type for available values. -RandomMaxDelay *string `json:"randomMaxDelay,omitempty" tf:"random_max_delay,omitempty"` + // — Configuration of the random delay between the execution of parallel tasks. See interval_type for available values. + RandomMaxDelay *string `json:"randomMaxDelay,omitempty" tf:"random_max_delay,omitempty"` -// — Scheme of the backups. Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". -Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + // — Scheme of the backups. Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` -// — A day of week to start weekly backups. See day_type for available values. -WeeklyBackupDay *string `json:"weeklyBackupDay,omitempty" tf:"weekly_backup_day,omitempty"` + // — A day of week to start weekly backups. See day_type for available values. + WeeklyBackupDay *string `json:"weeklyBackupDay,omitempty" tf:"weekly_backup_day,omitempty"` } - type SchedulingObservation struct { + // A list of schedules with backup sets that compose the whole scheme. + BackupSets []BackupSetsObservation `json:"backupSets,omitempty" tf:"backup_sets,omitempty"` -// A list of schedules with backup sets that compose the whole scheme. -BackupSets []BackupSetsObservation `json:"backupSets,omitempty" tf:"backup_sets,omitempty"` - -// — Enable flag -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. -ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` + // — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` -// — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. -ExecuteByTime []SchedulingExecuteByTimeObservation `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` + // — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByTime []SchedulingExecuteByTimeObservation `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` -// — Maximum number of backup processes allowed to run in parallel. 0 for unlimited. -MaxParallelBackups *float64 `json:"maxParallelBackups,omitempty" tf:"max_parallel_backups,omitempty"` + // — Maximum number of backup processes allowed to run in parallel. 0 for unlimited. + MaxParallelBackups *float64 `json:"maxParallelBackups,omitempty" tf:"max_parallel_backups,omitempty"` -// — Configuration of the random delay between the execution of parallel tasks. See interval_type for available values. -RandomMaxDelay *string `json:"randomMaxDelay,omitempty" tf:"random_max_delay,omitempty"` + // — Configuration of the random delay between the execution of parallel tasks. See interval_type for available values. + RandomMaxDelay *string `json:"randomMaxDelay,omitempty" tf:"random_max_delay,omitempty"` -// — Scheme of the backups. Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". -Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + // — Scheme of the backups. Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` -// — A day of week to start weekly backups. See day_type for available values. -WeeklyBackupDay *string `json:"weeklyBackupDay,omitempty" tf:"weekly_backup_day,omitempty"` + // — A day of week to start weekly backups. See day_type for available values. + WeeklyBackupDay *string `json:"weeklyBackupDay,omitempty" tf:"weekly_backup_day,omitempty"` } - type SchedulingParameters struct { + // A list of schedules with backup sets that compose the whole scheme. + // +kubebuilder:validation:Optional + BackupSets []BackupSetsParameters `json:"backupSets,omitempty" tf:"backup_sets,omitempty"` -// A list of schedules with backup sets that compose the whole scheme. -// +kubebuilder:validation:Optional -BackupSets []BackupSetsParameters `json:"backupSets,omitempty" tf:"backup_sets,omitempty"` - -// — Enable flag -// +kubebuilder:validation:Optional -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // — Enable flag + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. -// +kubebuilder:validation:Optional -ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` + // — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + // +kubebuilder:validation:Optional + ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` -// — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. -// +kubebuilder:validation:Optional -ExecuteByTime []SchedulingExecuteByTimeParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` + // — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. + // +kubebuilder:validation:Optional + ExecuteByTime []SchedulingExecuteByTimeParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` -// — Maximum number of backup processes allowed to run in parallel. 0 for unlimited. -// +kubebuilder:validation:Optional -MaxParallelBackups *float64 `json:"maxParallelBackups,omitempty" tf:"max_parallel_backups,omitempty"` + // — Maximum number of backup processes allowed to run in parallel. 0 for unlimited. + // +kubebuilder:validation:Optional + MaxParallelBackups *float64 `json:"maxParallelBackups,omitempty" tf:"max_parallel_backups,omitempty"` -// — Configuration of the random delay between the execution of parallel tasks. See interval_type for available values. -// +kubebuilder:validation:Optional -RandomMaxDelay *string `json:"randomMaxDelay,omitempty" tf:"random_max_delay,omitempty"` + // — Configuration of the random delay between the execution of parallel tasks. See interval_type for available values. + // +kubebuilder:validation:Optional + RandomMaxDelay *string `json:"randomMaxDelay,omitempty" tf:"random_max_delay,omitempty"` -// — Scheme of the backups. Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". -// +kubebuilder:validation:Optional -Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + // — Scheme of the backups. Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". + // +kubebuilder:validation:Optional + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` -// — A day of week to start weekly backups. See day_type for available values. -// +kubebuilder:validation:Optional -WeeklyBackupDay *string `json:"weeklyBackupDay,omitempty" tf:"weekly_backup_day,omitempty"` + // — A day of week to start weekly backups. See day_type for available values. + // +kubebuilder:validation:Optional + WeeklyBackupDay *string `json:"weeklyBackupDay,omitempty" tf:"weekly_backup_day,omitempty"` } - type VMSnapshotReattemptsInitParameters struct { + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Enable flag -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -// — Retry interval. See interval_type for available values -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // — Retry interval. See interval_type for available values + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// — Maximum number of attempts before throwing an error -MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + // — Maximum number of attempts before throwing an error + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` } - type VMSnapshotReattemptsObservation struct { + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Enable flag -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -// — Retry interval. See interval_type for available values -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // — Retry interval. See interval_type for available values + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// — Maximum number of attempts before throwing an error -MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + // — Maximum number of attempts before throwing an error + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` } - type VMSnapshotReattemptsParameters struct { + // — Enable flag + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// — Enable flag -// +kubebuilder:validation:Optional -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -// — Retry interval. See interval_type for available values -// +kubebuilder:validation:Optional -Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + // — Retry interval. See interval_type for available values + // +kubebuilder:validation:Optional + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` -// — Maximum number of attempts before throwing an error -// +kubebuilder:validation:Optional -MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + // — Maximum number of attempts before throwing an error + // +kubebuilder:validation:Optional + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` } // PolicySpec defines the desired state of Policy type PolicySpec struct { v1.ResourceSpec `json:",inline"` - ForProvider PolicyParameters `json:"forProvider"` + ForProvider PolicyParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -731,20 +672,19 @@ type PolicySpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider PolicyInitParameters `json:"initProvider,omitempty"` + InitProvider PolicyInitParameters `json:"initProvider,omitempty"` } // PolicyStatus defines the observed state of Policy. type PolicyStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider PolicyObservation `json:"atProvider,omitempty"` + AtProvider PolicyObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Policy is the Schema for the Policys API. Allows management of Yandex.Cloud Backup Policy. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -754,13 +694,13 @@ type PolicyStatus struct { type Policy struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.reattempts) || (has(self.initProvider) && has(self.initProvider.reattempts))",message="spec.forProvider.reattempts is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.retention) || (has(self.initProvider) && has(self.initProvider.retention))",message="spec.forProvider.retention is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scheduling) || (has(self.initProvider) && has(self.initProvider.scheduling))",message="spec.forProvider.scheduling is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vmSnapshotReattempts) || (has(self.initProvider) && has(self.initProvider.vmSnapshotReattempts))",message="spec.forProvider.vmSnapshotReattempts is a required parameter" - Spec PolicySpec `json:"spec"` - Status PolicyStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.reattempts) || (has(self.initProvider) && has(self.initProvider.reattempts))",message="spec.forProvider.reattempts is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.retention) || (has(self.initProvider) && has(self.initProvider.retention))",message="spec.forProvider.retention is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scheduling) || (has(self.initProvider) && has(self.initProvider.scheduling))",message="spec.forProvider.scheduling is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vmSnapshotReattempts) || (has(self.initProvider) && has(self.initProvider.vmSnapshotReattempts))",message="spec.forProvider.vmSnapshotReattempts is a required parameter" + Spec PolicySpec `json:"spec"` + Status PolicyStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/backup/v1alpha1/zz_policybindings_terraformed.go b/apis/backup/v1alpha1/zz_policybindings_terraformed.go index 9946821..5286b2f 100755 --- a/apis/backup/v1alpha1/zz_policybindings_terraformed.go +++ b/apis/backup/v1alpha1/zz_policybindings_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this PolicyBindings func (mg *PolicyBindings) GetTerraformResourceType() string { - return "yandex_backup_policy_bindings" + return "yandex_backup_policy_bindings" } // GetConnectionDetailsMapping for this PolicyBindings func (tr *PolicyBindings) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this PolicyBindings func (tr *PolicyBindings) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this PolicyBindings func (tr *PolicyBindings) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this PolicyBindings func (tr *PolicyBindings) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this PolicyBindings func (tr *PolicyBindings) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this PolicyBindings func (tr *PolicyBindings) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this PolicyBindings func (tr *PolicyBindings) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this PolicyBindings func (tr *PolicyBindings) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this PolicyBindings using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *PolicyBindings) LateInitialize(attrs []byte) (bool, error) { - params := &PolicyBindingsParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &PolicyBindingsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *PolicyBindings) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/backup/v1alpha1/zz_policybindings_types.go b/apis/backup/v1alpha1/zz_policybindings_types.go index 18aab34..61858e0 100755 --- a/apis/backup/v1alpha1/zz_policybindings_types.go +++ b/apis/backup/v1alpha1/zz_policybindings_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,81 +7,72 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type PolicyBindingsInitParameters struct { + // — Compute Cloud instance ID. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` -// — Compute Cloud instance ID. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1.Instance -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + // Reference to a Instance in compute to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` -// Reference to a Instance in compute to populate instanceId. -// +kubebuilder:validation:Optional -InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + // Selector for a Instance in compute to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` -// Selector for a Instance in compute to populate instanceId. -// +kubebuilder:validation:Optional -InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` - -// — Backup Policy ID. -PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + // — Backup Policy ID. + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` } - type PolicyBindingsObservation struct { + // Creation timestamp of the Yandex Cloud Policy Bindings. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Creation timestamp of the Yandex Cloud Policy Bindings. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` - -// Boolean flag that specifies whether the policy application is enabled. May be false if processing flag is true. -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // Boolean flag that specifies whether the policy application is enabled. May be false if processing flag is true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// — Compute Cloud instance ID. -InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + // — Compute Cloud instance ID. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` -// — Backup Policy ID. -PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + // — Backup Policy ID. + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` -// Boolean flag that specifies whether the policy is in the process of binding to an instance. -Processing *bool `json:"processing,omitempty" tf:"processing,omitempty"` + // Boolean flag that specifies whether the policy is in the process of binding to an instance. + Processing *bool `json:"processing,omitempty" tf:"processing,omitempty"` } - type PolicyBindingsParameters struct { + // — Compute Cloud instance ID. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` -// — Compute Cloud instance ID. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1.Instance -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` - -// Reference to a Instance in compute to populate instanceId. -// +kubebuilder:validation:Optional -InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + // Reference to a Instance in compute to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` -// Selector for a Instance in compute to populate instanceId. -// +kubebuilder:validation:Optional -InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + // Selector for a Instance in compute to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` -// — Backup Policy ID. -// +kubebuilder:validation:Optional -PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + // — Backup Policy ID. + // +kubebuilder:validation:Optional + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` } // PolicyBindingsSpec defines the desired state of PolicyBindings type PolicyBindingsSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider PolicyBindingsParameters `json:"forProvider"` + ForProvider PolicyBindingsParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -94,20 +83,19 @@ type PolicyBindingsSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider PolicyBindingsInitParameters `json:"initProvider,omitempty"` + InitProvider PolicyBindingsInitParameters `json:"initProvider,omitempty"` } // PolicyBindingsStatus defines the observed state of PolicyBindings. type PolicyBindingsStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider PolicyBindingsObservation `json:"atProvider,omitempty"` + AtProvider PolicyBindingsObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // PolicyBindings is the Schema for the PolicyBindingss API. Allows to bind compute instance with backup policy. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -117,9 +105,9 @@ type PolicyBindingsStatus struct { type PolicyBindings struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyId) || (has(self.initProvider) && has(self.initProvider.policyId))",message="spec.forProvider.policyId is a required parameter" - Spec PolicyBindingsSpec `json:"spec"` - Status PolicyBindingsStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyId) || (has(self.initProvider) && has(self.initProvider.policyId))",message="spec.forProvider.policyId is a required parameter" + Spec PolicyBindingsSpec `json:"spec"` + Status PolicyBindingsStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/billing/v1alpha1/zz_cloudbinding_terraformed.go b/apis/billing/v1alpha1/zz_cloudbinding_terraformed.go new file mode 100755 index 0000000..cf80ab5 --- /dev/null +++ b/apis/billing/v1alpha1/zz_cloudbinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CloudBinding +func (mg *CloudBinding) GetTerraformResourceType() string { + return "yandex_billing_cloud_binding" +} + +// GetConnectionDetailsMapping for this CloudBinding +func (tr *CloudBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CloudBinding +func (tr *CloudBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CloudBinding +func (tr *CloudBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CloudBinding +func (tr *CloudBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CloudBinding +func (tr *CloudBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CloudBinding +func (tr *CloudBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CloudBinding +func (tr *CloudBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CloudBinding +func (tr *CloudBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CloudBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CloudBinding) LateInitialize(attrs []byte) (bool, error) { + params := &CloudBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CloudBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/billing/v1alpha1/zz_cloudbinding_types.go b/apis/billing/v1alpha1/zz_cloudbinding_types.go new file mode 100755 index 0000000..e8b60e6 --- /dev/null +++ b/apis/billing/v1alpha1/zz_cloudbinding_types.go @@ -0,0 +1,104 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudBindingInitParameters struct { + + // ID of billing account to bind cloud to. + BillingAccountID *string `json:"billingAccountId,omitempty" tf:"billing_account_id,omitempty"` + + // ID of cloud to bind. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` +} + +type CloudBindingObservation struct { + + // ID of billing account to bind cloud to. + BillingAccountID *string `json:"billingAccountId,omitempty" tf:"billing_account_id,omitempty"` + + // ID of cloud to bind. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type CloudBindingParameters struct { + + // ID of billing account to bind cloud to. + // +kubebuilder:validation:Optional + BillingAccountID *string `json:"billingAccountId,omitempty" tf:"billing_account_id,omitempty"` + + // ID of cloud to bind. + // +kubebuilder:validation:Optional + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` +} + +// CloudBindingSpec defines the desired state of CloudBinding +type CloudBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CloudBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CloudBindingInitParameters `json:"initProvider,omitempty"` +} + +// CloudBindingStatus defines the observed state of CloudBinding. +type CloudBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CloudBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// CloudBinding is the Schema for the CloudBindings API. Bind cloud to billing account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type CloudBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.billingAccountId) || (has(self.initProvider) && has(self.initProvider.billingAccountId))",message="spec.forProvider.billingAccountId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.cloudId) || (has(self.initProvider) && has(self.initProvider.cloudId))",message="spec.forProvider.cloudId is a required parameter" + Spec CloudBindingSpec `json:"spec"` + Status CloudBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CloudBindingList contains a list of CloudBindings +type CloudBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CloudBinding `json:"items"` +} + +// Repository type metadata. +var ( + CloudBinding_Kind = "CloudBinding" + CloudBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CloudBinding_Kind}.String() + CloudBinding_KindAPIVersion = CloudBinding_Kind + "." + CRDGroupVersion.String() + CloudBinding_GroupVersionKind = CRDGroupVersion.WithKind(CloudBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&CloudBinding{}, &CloudBindingList{}) +} diff --git a/apis/billing/v1alpha1/zz_generated.conversion_hubs.go b/apis/billing/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..a2f6b4c --- /dev/null +++ b/apis/billing/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *CloudBinding) Hub() {} diff --git a/apis/billing/v1alpha1/zz_generated.deepcopy.go b/apis/billing/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..073c2ea --- /dev/null +++ b/apis/billing/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,183 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBinding) DeepCopyInto(out *CloudBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBinding. +func (in *CloudBinding) DeepCopy() *CloudBinding { + if in == nil { + return nil + } + out := new(CloudBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingInitParameters) DeepCopyInto(out *CloudBindingInitParameters) { + *out = *in + if in.BillingAccountID != nil { + in, out := &in.BillingAccountID, &out.BillingAccountID + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingInitParameters. +func (in *CloudBindingInitParameters) DeepCopy() *CloudBindingInitParameters { + if in == nil { + return nil + } + out := new(CloudBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingList) DeepCopyInto(out *CloudBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingList. +func (in *CloudBindingList) DeepCopy() *CloudBindingList { + if in == nil { + return nil + } + out := new(CloudBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingObservation) DeepCopyInto(out *CloudBindingObservation) { + *out = *in + if in.BillingAccountID != nil { + in, out := &in.BillingAccountID, &out.BillingAccountID + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingObservation. +func (in *CloudBindingObservation) DeepCopy() *CloudBindingObservation { + if in == nil { + return nil + } + out := new(CloudBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingParameters) DeepCopyInto(out *CloudBindingParameters) { + *out = *in + if in.BillingAccountID != nil { + in, out := &in.BillingAccountID, &out.BillingAccountID + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingParameters. +func (in *CloudBindingParameters) DeepCopy() *CloudBindingParameters { + if in == nil { + return nil + } + out := new(CloudBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingSpec) DeepCopyInto(out *CloudBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingSpec. +func (in *CloudBindingSpec) DeepCopy() *CloudBindingSpec { + if in == nil { + return nil + } + out := new(CloudBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingStatus) DeepCopyInto(out *CloudBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingStatus. +func (in *CloudBindingStatus) DeepCopy() *CloudBindingStatus { + if in == nil { + return nil + } + out := new(CloudBindingStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/billing/v1alpha1/zz_groupversion_info.go b/apis/billing/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..9f2c087 --- /dev/null +++ b/apis/billing/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=billing.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "billing.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cdn/v1alpha1/zz_generated.conversion_hubs.go b/apis/cdn/v1alpha1/zz_generated.conversion_hubs.go index c82a7a2..8304d38 100755 --- a/apis/cdn/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/cdn/v1alpha1/zz_generated.conversion_hubs.go @@ -1,13 +1,9 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *OriginGroup) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *OriginGroup) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *Resource) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *Resource) Hub() {} diff --git a/apis/cdn/v1alpha1/zz_generated.deepcopy.go b/apis/cdn/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..898fe65 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1464 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressACLInitParameters) DeepCopyInto(out *IPAddressACLInitParameters) { + *out = *in + if in.ExceptedValues != nil { + in, out := &in.ExceptedValues, &out.ExceptedValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressACLInitParameters. +func (in *IPAddressACLInitParameters) DeepCopy() *IPAddressACLInitParameters { + if in == nil { + return nil + } + out := new(IPAddressACLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressACLObservation) DeepCopyInto(out *IPAddressACLObservation) { + *out = *in + if in.ExceptedValues != nil { + in, out := &in.ExceptedValues, &out.ExceptedValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressACLObservation. +func (in *IPAddressACLObservation) DeepCopy() *IPAddressACLObservation { + if in == nil { + return nil + } + out := new(IPAddressACLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressACLParameters) DeepCopyInto(out *IPAddressACLParameters) { + *out = *in + if in.ExceptedValues != nil { + in, out := &in.ExceptedValues, &out.ExceptedValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressACLParameters. +func (in *IPAddressACLParameters) DeepCopy() *IPAddressACLParameters { + if in == nil { + return nil + } + out := new(IPAddressACLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsInitParameters) DeepCopyInto(out *OptionsInitParameters) { + *out = *in + if in.AllowedHTTPMethods != nil { + in, out := &in.AllowedHTTPMethods, &out.AllowedHTTPMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BrowserCacheSettings != nil { + in, out := &in.BrowserCacheSettings, &out.BrowserCacheSettings + *out = new(float64) + **out = **in + } + if in.CacheHTTPHeaders != nil { + in, out := &in.CacheHTTPHeaders, &out.CacheHTTPHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomHostHeader != nil { + in, out := &in.CustomHostHeader, &out.CustomHostHeader + *out = new(string) + **out = **in + } + if in.CustomServerName != nil { + in, out := &in.CustomServerName, &out.CustomServerName + *out = new(string) + **out = **in + } + if in.DisableCache != nil { + in, out := &in.DisableCache, &out.DisableCache + *out = new(bool) + **out = **in + } + if in.DisableProxyForceRanges != nil { + in, out := &in.DisableProxyForceRanges, &out.DisableProxyForceRanges + *out = new(bool) + **out = **in + } + if in.EdgeCacheSettings != nil { + in, out := &in.EdgeCacheSettings, &out.EdgeCacheSettings + *out = new(float64) + **out = **in + } + if in.EnableIPURLSigning != nil { + in, out := &in.EnableIPURLSigning, &out.EnableIPURLSigning + *out = new(bool) + **out = **in + } + if in.FetchedCompressed != nil { + in, out := &in.FetchedCompressed, &out.FetchedCompressed + *out = new(bool) + **out = **in + } + if in.ForwardHostHeader != nil { + in, out := &in.ForwardHostHeader, &out.ForwardHostHeader + *out = new(bool) + **out = **in + } + if in.GzipOn != nil { + in, out := &in.GzipOn, &out.GzipOn + *out = new(bool) + **out = **in + } + if in.IPAddressACL != nil { + in, out := &in.IPAddressACL, &out.IPAddressACL + *out = make([]IPAddressACLInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IgnoreCookie != nil { + in, out := &in.IgnoreCookie, &out.IgnoreCookie + *out = new(bool) + **out = **in + } + if in.IgnoreQueryParams != nil { + in, out := &in.IgnoreQueryParams, &out.IgnoreQueryParams + *out = new(bool) + **out = **in + } + if in.ProxyCacheMethodsSet != nil { + in, out := &in.ProxyCacheMethodsSet, &out.ProxyCacheMethodsSet + *out = new(bool) + **out = **in + } + if in.QueryParamsBlacklist != nil { + in, out := &in.QueryParamsBlacklist, &out.QueryParamsBlacklist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryParamsWhitelist != nil { + in, out := &in.QueryParamsWhitelist, &out.QueryParamsWhitelist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectHTTPSToHTTP != nil { + in, out := &in.RedirectHTTPSToHTTP, &out.RedirectHTTPSToHTTP + *out = new(bool) + **out = **in + } + if in.RedirectHTTPToHTTPS != nil { + in, out := &in.RedirectHTTPToHTTPS, &out.RedirectHTTPToHTTPS + *out = new(bool) + **out = **in + } + if in.SecureKey != nil { + in, out := &in.SecureKey, &out.SecureKey + *out = new(string) + **out = **in + } + if in.Slice != nil { + in, out := &in.Slice, &out.Slice + *out = new(bool) + **out = **in + } + if in.StaticRequestHeaders != nil { + in, out := &in.StaticRequestHeaders, &out.StaticRequestHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StaticResponseHeaders != nil { + in, out := &in.StaticResponseHeaders, &out.StaticResponseHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsInitParameters. +func (in *OptionsInitParameters) DeepCopy() *OptionsInitParameters { + if in == nil { + return nil + } + out := new(OptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsObservation) DeepCopyInto(out *OptionsObservation) { + *out = *in + if in.AllowedHTTPMethods != nil { + in, out := &in.AllowedHTTPMethods, &out.AllowedHTTPMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BrowserCacheSettings != nil { + in, out := &in.BrowserCacheSettings, &out.BrowserCacheSettings + *out = new(float64) + **out = **in + } + if in.CacheHTTPHeaders != nil { + in, out := &in.CacheHTTPHeaders, &out.CacheHTTPHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomHostHeader != nil { + in, out := &in.CustomHostHeader, &out.CustomHostHeader + *out = new(string) + **out = **in + } + if in.CustomServerName != nil { + in, out := &in.CustomServerName, &out.CustomServerName + *out = new(string) + **out = **in + } + if in.DisableCache != nil { + in, out := &in.DisableCache, &out.DisableCache + *out = new(bool) + **out = **in + } + if in.DisableProxyForceRanges != nil { + in, out := &in.DisableProxyForceRanges, &out.DisableProxyForceRanges + *out = new(bool) + **out = **in + } + if in.EdgeCacheSettings != nil { + in, out := &in.EdgeCacheSettings, &out.EdgeCacheSettings + *out = new(float64) + **out = **in + } + if in.EnableIPURLSigning != nil { + in, out := &in.EnableIPURLSigning, &out.EnableIPURLSigning + *out = new(bool) + **out = **in + } + if in.FetchedCompressed != nil { + in, out := &in.FetchedCompressed, &out.FetchedCompressed + *out = new(bool) + **out = **in + } + if in.ForwardHostHeader != nil { + in, out := &in.ForwardHostHeader, &out.ForwardHostHeader + *out = new(bool) + **out = **in + } + if in.GzipOn != nil { + in, out := &in.GzipOn, &out.GzipOn + *out = new(bool) + **out = **in + } + if in.IPAddressACL != nil { + in, out := &in.IPAddressACL, &out.IPAddressACL + *out = make([]IPAddressACLObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IgnoreCookie != nil { + in, out := &in.IgnoreCookie, &out.IgnoreCookie + *out = new(bool) + **out = **in + } + if in.IgnoreQueryParams != nil { + in, out := &in.IgnoreQueryParams, &out.IgnoreQueryParams + *out = new(bool) + **out = **in + } + if in.ProxyCacheMethodsSet != nil { + in, out := &in.ProxyCacheMethodsSet, &out.ProxyCacheMethodsSet + *out = new(bool) + **out = **in + } + if in.QueryParamsBlacklist != nil { + in, out := &in.QueryParamsBlacklist, &out.QueryParamsBlacklist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryParamsWhitelist != nil { + in, out := &in.QueryParamsWhitelist, &out.QueryParamsWhitelist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectHTTPSToHTTP != nil { + in, out := &in.RedirectHTTPSToHTTP, &out.RedirectHTTPSToHTTP + *out = new(bool) + **out = **in + } + if in.RedirectHTTPToHTTPS != nil { + in, out := &in.RedirectHTTPToHTTPS, &out.RedirectHTTPToHTTPS + *out = new(bool) + **out = **in + } + if in.SecureKey != nil { + in, out := &in.SecureKey, &out.SecureKey + *out = new(string) + **out = **in + } + if in.Slice != nil { + in, out := &in.Slice, &out.Slice + *out = new(bool) + **out = **in + } + if in.StaticRequestHeaders != nil { + in, out := &in.StaticRequestHeaders, &out.StaticRequestHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StaticResponseHeaders != nil { + in, out := &in.StaticResponseHeaders, &out.StaticResponseHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsObservation. +func (in *OptionsObservation) DeepCopy() *OptionsObservation { + if in == nil { + return nil + } + out := new(OptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsParameters) DeepCopyInto(out *OptionsParameters) { + *out = *in + if in.AllowedHTTPMethods != nil { + in, out := &in.AllowedHTTPMethods, &out.AllowedHTTPMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BrowserCacheSettings != nil { + in, out := &in.BrowserCacheSettings, &out.BrowserCacheSettings + *out = new(float64) + **out = **in + } + if in.CacheHTTPHeaders != nil { + in, out := &in.CacheHTTPHeaders, &out.CacheHTTPHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomHostHeader != nil { + in, out := &in.CustomHostHeader, &out.CustomHostHeader + *out = new(string) + **out = **in + } + if in.CustomServerName != nil { + in, out := &in.CustomServerName, &out.CustomServerName + *out = new(string) + **out = **in + } + if in.DisableCache != nil { + in, out := &in.DisableCache, &out.DisableCache + *out = new(bool) + **out = **in + } + if in.DisableProxyForceRanges != nil { + in, out := &in.DisableProxyForceRanges, &out.DisableProxyForceRanges + *out = new(bool) + **out = **in + } + if in.EdgeCacheSettings != nil { + in, out := &in.EdgeCacheSettings, &out.EdgeCacheSettings + *out = new(float64) + **out = **in + } + if in.EnableIPURLSigning != nil { + in, out := &in.EnableIPURLSigning, &out.EnableIPURLSigning + *out = new(bool) + **out = **in + } + if in.FetchedCompressed != nil { + in, out := &in.FetchedCompressed, &out.FetchedCompressed + *out = new(bool) + **out = **in + } + if in.ForwardHostHeader != nil { + in, out := &in.ForwardHostHeader, &out.ForwardHostHeader + *out = new(bool) + **out = **in + } + if in.GzipOn != nil { + in, out := &in.GzipOn, &out.GzipOn + *out = new(bool) + **out = **in + } + if in.IPAddressACL != nil { + in, out := &in.IPAddressACL, &out.IPAddressACL + *out = make([]IPAddressACLParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IgnoreCookie != nil { + in, out := &in.IgnoreCookie, &out.IgnoreCookie + *out = new(bool) + **out = **in + } + if in.IgnoreQueryParams != nil { + in, out := &in.IgnoreQueryParams, &out.IgnoreQueryParams + *out = new(bool) + **out = **in + } + if in.ProxyCacheMethodsSet != nil { + in, out := &in.ProxyCacheMethodsSet, &out.ProxyCacheMethodsSet + *out = new(bool) + **out = **in + } + if in.QueryParamsBlacklist != nil { + in, out := &in.QueryParamsBlacklist, &out.QueryParamsBlacklist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryParamsWhitelist != nil { + in, out := &in.QueryParamsWhitelist, &out.QueryParamsWhitelist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectHTTPSToHTTP != nil { + in, out := &in.RedirectHTTPSToHTTP, &out.RedirectHTTPSToHTTP + *out = new(bool) + **out = **in + } + if in.RedirectHTTPToHTTPS != nil { + in, out := &in.RedirectHTTPToHTTPS, &out.RedirectHTTPToHTTPS + *out = new(bool) + **out = **in + } + if in.SecureKey != nil { + in, out := &in.SecureKey, &out.SecureKey + *out = new(string) + **out = **in + } + if in.Slice != nil { + in, out := &in.Slice, &out.Slice + *out = new(bool) + **out = **in + } + if in.StaticRequestHeaders != nil { + in, out := &in.StaticRequestHeaders, &out.StaticRequestHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StaticResponseHeaders != nil { + in, out := &in.StaticResponseHeaders, &out.StaticResponseHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsParameters. +func (in *OptionsParameters) DeepCopy() *OptionsParameters { + if in == nil { + return nil + } + out := new(OptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroup) DeepCopyInto(out *OriginGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroup. +func (in *OriginGroup) DeepCopy() *OriginGroup { + if in == nil { + return nil + } + out := new(OriginGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OriginGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupInitParameters) DeepCopyInto(out *OriginGroupInitParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UseNext != nil { + in, out := &in.UseNext, &out.UseNext + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupInitParameters. +func (in *OriginGroupInitParameters) DeepCopy() *OriginGroupInitParameters { + if in == nil { + return nil + } + out := new(OriginGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupList) DeepCopyInto(out *OriginGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OriginGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupList. +func (in *OriginGroupList) DeepCopy() *OriginGroupList { + if in == nil { + return nil + } + out := new(OriginGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OriginGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupObservation) DeepCopyInto(out *OriginGroupObservation) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UseNext != nil { + in, out := &in.UseNext, &out.UseNext + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupObservation. +func (in *OriginGroupObservation) DeepCopy() *OriginGroupObservation { + if in == nil { + return nil + } + out := new(OriginGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupParameters) DeepCopyInto(out *OriginGroupParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UseNext != nil { + in, out := &in.UseNext, &out.UseNext + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupParameters. +func (in *OriginGroupParameters) DeepCopy() *OriginGroupParameters { + if in == nil { + return nil + } + out := new(OriginGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupSpec) DeepCopyInto(out *OriginGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupSpec. +func (in *OriginGroupSpec) DeepCopy() *OriginGroupSpec { + if in == nil { + return nil + } + out := new(OriginGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupStatus) DeepCopyInto(out *OriginGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupStatus. +func (in *OriginGroupStatus) DeepCopy() *OriginGroupStatus { + if in == nil { + return nil + } + out := new(OriginGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginInitParameters) DeepCopyInto(out *OriginInitParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginInitParameters. +func (in *OriginInitParameters) DeepCopy() *OriginInitParameters { + if in == nil { + return nil + } + out := new(OriginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginObservation) DeepCopyInto(out *OriginObservation) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OriginGroupID != nil { + in, out := &in.OriginGroupID, &out.OriginGroupID + *out = new(float64) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginObservation. +func (in *OriginObservation) DeepCopy() *OriginObservation { + if in == nil { + return nil + } + out := new(OriginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginParameters) DeepCopyInto(out *OriginParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginParameters. +func (in *OriginParameters) DeepCopy() *OriginParameters { + if in == nil { + return nil + } + out := new(OriginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resource) DeepCopyInto(out *Resource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resource. +func (in *Resource) DeepCopy() *Resource { + if in == nil { + return nil + } + out := new(Resource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Resource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceInitParameters) DeepCopyInto(out *ResourceInitParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.Cname != nil { + in, out := &in.Cname, &out.Cname + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]OptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginGroupID != nil { + in, out := &in.OriginGroupID, &out.OriginGroupID + *out = new(float64) + **out = **in + } + if in.OriginGroupIDRef != nil { + in, out := &in.OriginGroupIDRef, &out.OriginGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginGroupIDSelector != nil { + in, out := &in.OriginGroupIDSelector, &out.OriginGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OriginGroupName != nil { + in, out := &in.OriginGroupName, &out.OriginGroupName + *out = new(string) + **out = **in + } + if in.OriginProtocol != nil { + in, out := &in.OriginProtocol, &out.OriginProtocol + *out = new(string) + **out = **in + } + if in.SSLCertificate != nil { + in, out := &in.SSLCertificate, &out.SSLCertificate + *out = make([]SSLCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryHostnames != nil { + in, out := &in.SecondaryHostnames, &out.SecondaryHostnames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceInitParameters. +func (in *ResourceInitParameters) DeepCopy() *ResourceInitParameters { + if in == nil { + return nil + } + out := new(ResourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceList) DeepCopyInto(out *ResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Resource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in *ResourceList) DeepCopy() *ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceObservation) DeepCopyInto(out *ResourceObservation) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.Cname != nil { + in, out := &in.Cname, &out.Cname + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]OptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginGroupID != nil { + in, out := &in.OriginGroupID, &out.OriginGroupID + *out = new(float64) + **out = **in + } + if in.OriginGroupName != nil { + in, out := &in.OriginGroupName, &out.OriginGroupName + *out = new(string) + **out = **in + } + if in.OriginProtocol != nil { + in, out := &in.OriginProtocol, &out.OriginProtocol + *out = new(string) + **out = **in + } + if in.ProviderCname != nil { + in, out := &in.ProviderCname, &out.ProviderCname + *out = new(string) + **out = **in + } + if in.SSLCertificate != nil { + in, out := &in.SSLCertificate, &out.SSLCertificate + *out = make([]SSLCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryHostnames != nil { + in, out := &in.SecondaryHostnames, &out.SecondaryHostnames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceObservation. +func (in *ResourceObservation) DeepCopy() *ResourceObservation { + if in == nil { + return nil + } + out := new(ResourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceParameters) DeepCopyInto(out *ResourceParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.Cname != nil { + in, out := &in.Cname, &out.Cname + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]OptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginGroupID != nil { + in, out := &in.OriginGroupID, &out.OriginGroupID + *out = new(float64) + **out = **in + } + if in.OriginGroupIDRef != nil { + in, out := &in.OriginGroupIDRef, &out.OriginGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginGroupIDSelector != nil { + in, out := &in.OriginGroupIDSelector, &out.OriginGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OriginGroupName != nil { + in, out := &in.OriginGroupName, &out.OriginGroupName + *out = new(string) + **out = **in + } + if in.OriginProtocol != nil { + in, out := &in.OriginProtocol, &out.OriginProtocol + *out = new(string) + **out = **in + } + if in.SSLCertificate != nil { + in, out := &in.SSLCertificate, &out.SSLCertificate + *out = make([]SSLCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryHostnames != nil { + in, out := &in.SecondaryHostnames, &out.SecondaryHostnames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceParameters. +func (in *ResourceParameters) DeepCopy() *ResourceParameters { + if in == nil { + return nil + } + out := new(ResourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in *ResourceSpec) DeepCopy() *ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. +func (in *ResourceStatus) DeepCopy() *ResourceStatus { + if in == nil { + return nil + } + out := new(ResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLCertificateInitParameters) DeepCopyInto(out *SSLCertificateInitParameters) { + *out = *in + if in.CertificateManagerID != nil { + in, out := &in.CertificateManagerID, &out.CertificateManagerID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLCertificateInitParameters. +func (in *SSLCertificateInitParameters) DeepCopy() *SSLCertificateInitParameters { + if in == nil { + return nil + } + out := new(SSLCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLCertificateObservation) DeepCopyInto(out *SSLCertificateObservation) { + *out = *in + if in.CertificateManagerID != nil { + in, out := &in.CertificateManagerID, &out.CertificateManagerID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLCertificateObservation. +func (in *SSLCertificateObservation) DeepCopy() *SSLCertificateObservation { + if in == nil { + return nil + } + out := new(SSLCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLCertificateParameters) DeepCopyInto(out *SSLCertificateParameters) { + *out = *in + if in.CertificateManagerID != nil { + in, out := &in.CertificateManagerID, &out.CertificateManagerID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLCertificateParameters. +func (in *SSLCertificateParameters) DeepCopy() *SSLCertificateParameters { + if in == nil { + return nil + } + out := new(SSLCertificateParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cdn/v1alpha1/zz_generated.resolvers.go b/apis/cdn/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..45e0381 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,128 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this OriginGroup. +func (mg *OriginGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Resource. +func (mg *Resource) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromFloatPtrValue(mg.Spec.ForProvider.OriginGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.OriginGroupIDRef, + Selector: mg.Spec.ForProvider.OriginGroupIDSelector, + To: reference.To{ + List: &OriginGroupList{}, + Managed: &OriginGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OriginGroupID") + } + mg.Spec.ForProvider.OriginGroupID = reference.ToFloatPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OriginGroupIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromFloatPtrValue(mg.Spec.InitProvider.OriginGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.OriginGroupIDRef, + Selector: mg.Spec.InitProvider.OriginGroupIDSelector, + To: reference.To{ + List: &OriginGroupList{}, + Managed: &OriginGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OriginGroupID") + } + mg.Spec.InitProvider.OriginGroupID = reference.ToFloatPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OriginGroupIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cdn/v1alpha1/zz_groupversion_info.go b/apis/cdn/v1alpha1/zz_groupversion_info.go index ca57894..eda484c 100755 --- a/apis/cdn/v1alpha1/zz_groupversion_info.go +++ b/apis/cdn/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/cdn/v1alpha1/zz_origingroup_terraformed.go b/apis/cdn/v1alpha1/zz_origingroup_terraformed.go index 505c7c3..8cdcff3 100755 --- a/apis/cdn/v1alpha1/zz_origingroup_terraformed.go +++ b/apis/cdn/v1alpha1/zz_origingroup_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this OriginGroup func (mg *OriginGroup) GetTerraformResourceType() string { - return "yandex_cdn_origin_group" + return "yandex_cdn_origin_group" } // GetConnectionDetailsMapping for this OriginGroup func (tr *OriginGroup) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this OriginGroup func (tr *OriginGroup) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this OriginGroup func (tr *OriginGroup) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this OriginGroup func (tr *OriginGroup) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this OriginGroup func (tr *OriginGroup) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this OriginGroup func (tr *OriginGroup) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this OriginGroup func (tr *OriginGroup) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this OriginGroup func (tr *OriginGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this OriginGroup using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *OriginGroup) LateInitialize(attrs []byte) (bool, error) { - params := &OriginGroupParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &OriginGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *OriginGroup) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/cdn/v1alpha1/zz_origingroup_types.go b/apis/cdn/v1alpha1/zz_origingroup_types.go index f33b90d..fce5b51 100755 --- a/apis/cdn/v1alpha1/zz_origingroup_types.go +++ b/apis/cdn/v1alpha1/zz_origingroup_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,122 +7,104 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type OriginGroupInitParameters struct { + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // CDN Origin Group name used to define device. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// CDN Origin Group name used to define device. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + Origin []OriginInitParameters `json:"origin,omitempty" tf:"origin,omitempty"` -Origin []OriginInitParameters `json:"origin,omitempty" tf:"origin,omitempty"` - -// If the option is active (has true value), in case the origin responds with 4XX or 5XX codes, use the next origin from the list. -UseNext *bool `json:"useNext,omitempty" tf:"use_next,omitempty"` + // If the option is active (has true value), in case the origin responds with 4XX or 5XX codes, use the next origin from the list. + UseNext *bool `json:"useNext,omitempty" tf:"use_next,omitempty"` } - type OriginGroupObservation struct { + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // CDN Origin Group name used to define device. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// CDN Origin Group name used to define device. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + Origin []OriginObservation `json:"origin,omitempty" tf:"origin,omitempty"` -Origin []OriginObservation `json:"origin,omitempty" tf:"origin,omitempty"` - -// If the option is active (has true value), in case the origin responds with 4XX or 5XX codes, use the next origin from the list. -UseNext *bool `json:"useNext,omitempty" tf:"use_next,omitempty"` + // If the option is active (has true value), in case the origin responds with 4XX or 5XX codes, use the next origin from the list. + UseNext *bool `json:"useNext,omitempty" tf:"use_next,omitempty"` } - type OriginGroupParameters struct { + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // CDN Origin Group name used to define device. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// CDN Origin Group name used to define device. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // +kubebuilder:validation:Optional + Origin []OriginParameters `json:"origin,omitempty" tf:"origin,omitempty"` -// +kubebuilder:validation:Optional -Origin []OriginParameters `json:"origin,omitempty" tf:"origin,omitempty"` - -// If the option is active (has true value), in case the origin responds with 4XX or 5XX codes, use the next origin from the list. -// +kubebuilder:validation:Optional -UseNext *bool `json:"useNext,omitempty" tf:"use_next,omitempty"` + // If the option is active (has true value), in case the origin responds with 4XX or 5XX codes, use the next origin from the list. + // +kubebuilder:validation:Optional + UseNext *bool `json:"useNext,omitempty" tf:"use_next,omitempty"` } - type OriginInitParameters struct { + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` - -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -Source *string `json:"source,omitempty" tf:"source,omitempty"` + Source *string `json:"source,omitempty" tf:"source,omitempty"` } - type OriginObservation struct { + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` - -Source *string `json:"source,omitempty" tf:"source,omitempty"` + Source *string `json:"source,omitempty" tf:"source,omitempty"` } - type OriginParameters struct { + // +kubebuilder:validation:Optional + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` -// +kubebuilder:validation:Optional -Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// +kubebuilder:validation:Optional -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -// +kubebuilder:validation:Optional -Source *string `json:"source" tf:"source,omitempty"` + // +kubebuilder:validation:Optional + Source *string `json:"source" tf:"source,omitempty"` } // OriginGroupSpec defines the desired state of OriginGroup type OriginGroupSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider OriginGroupParameters `json:"forProvider"` + ForProvider OriginGroupParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -135,20 +115,19 @@ type OriginGroupSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider OriginGroupInitParameters `json:"initProvider,omitempty"` + InitProvider OriginGroupInitParameters `json:"initProvider,omitempty"` } // OriginGroupStatus defines the observed state of OriginGroup. type OriginGroupStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider OriginGroupObservation `json:"atProvider,omitempty"` + AtProvider OriginGroupObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // OriginGroup is the Schema for the OriginGroups API. Allows management of a Yandex.Cloud CDN Origin Groups. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -158,10 +137,10 @@ type OriginGroupStatus struct { type OriginGroup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.origin) || (has(self.initProvider) && has(self.initProvider.origin))",message="spec.forProvider.origin is a required parameter" - Spec OriginGroupSpec `json:"spec"` - Status OriginGroupStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.origin) || (has(self.initProvider) && has(self.initProvider.origin))",message="spec.forProvider.origin is a required parameter" + Spec OriginGroupSpec `json:"spec"` + Status OriginGroupStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/cdn/v1alpha1/zz_resource_terraformed.go b/apis/cdn/v1alpha1/zz_resource_terraformed.go index 767b194..77a2734 100755 --- a/apis/cdn/v1alpha1/zz_resource_terraformed.go +++ b/apis/cdn/v1alpha1/zz_resource_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Resource func (mg *Resource) GetTerraformResourceType() string { - return "yandex_cdn_resource" + return "yandex_cdn_resource" } // GetConnectionDetailsMapping for this Resource func (tr *Resource) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Resource func (tr *Resource) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Resource func (tr *Resource) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Resource func (tr *Resource) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Resource func (tr *Resource) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Resource func (tr *Resource) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Resource func (tr *Resource) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Resource func (tr *Resource) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Resource using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Resource) LateInitialize(attrs []byte) (bool, error) { - params := &ResourceParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &ResourceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Resource) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/cdn/v1alpha1/zz_resource_types.go b/apis/cdn/v1alpha1/zz_resource_types.go index ac24a8e..3f06ced 100755 --- a/apis/cdn/v1alpha1/zz_resource_types.go +++ b/apis/cdn/v1alpha1/zz_resource_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,498 +7,469 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type IPAddressACLInitParameters struct { + // the list of specified IP addresses to be allowed or denied depending on acl policy type. + ExceptedValues []*string `json:"exceptedValues,omitempty" tf:"excepted_values,omitempty"` -// the list of specified IP addresses to be allowed or denied depending on acl policy type. -ExceptedValues []*string `json:"exceptedValues,omitempty" tf:"excepted_values,omitempty"` - -// the policy type for ip_address_acl option, one of "allow" or "deny" values. -PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + // the policy type for ip_address_acl option, one of "allow" or "deny" values. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` } - type IPAddressACLObservation struct { + // the list of specified IP addresses to be allowed or denied depending on acl policy type. + ExceptedValues []*string `json:"exceptedValues,omitempty" tf:"excepted_values,omitempty"` -// the list of specified IP addresses to be allowed or denied depending on acl policy type. -ExceptedValues []*string `json:"exceptedValues,omitempty" tf:"excepted_values,omitempty"` - -// the policy type for ip_address_acl option, one of "allow" or "deny" values. -PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + // the policy type for ip_address_acl option, one of "allow" or "deny" values. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` } - type IPAddressACLParameters struct { + // the list of specified IP addresses to be allowed or denied depending on acl policy type. + // +kubebuilder:validation:Optional + ExceptedValues []*string `json:"exceptedValues,omitempty" tf:"excepted_values,omitempty"` -// the list of specified IP addresses to be allowed or denied depending on acl policy type. -// +kubebuilder:validation:Optional -ExceptedValues []*string `json:"exceptedValues,omitempty" tf:"excepted_values,omitempty"` - -// the policy type for ip_address_acl option, one of "allow" or "deny" values. -// +kubebuilder:validation:Optional -PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + // the policy type for ip_address_acl option, one of "allow" or "deny" values. + // +kubebuilder:validation:Optional + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` } - type OptionsInitParameters struct { + // HTTP methods for your CDN content. By default the following methods are allowed: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS. In case some methods are not allowed to the user, they will get the 405 (Method Not Allowed) response. If the method is not supported, the user gets the 501 (Not Implemented) response. + AllowedHTTPMethods []*string `json:"allowedHttpMethods,omitempty" tf:"allowed_http_methods,omitempty"` -// HTTP methods for your CDN content. By default the following methods are allowed: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS. In case some methods are not allowed to the user, they will get the 405 (Method Not Allowed) response. If the method is not supported, the user gets the 501 (Not Implemented) response. -AllowedHTTPMethods []*string `json:"allowedHttpMethods,omitempty" tf:"allowed_http_methods,omitempty"` + // set up a cache period for the end-users browser. Content will be cached due to origin settings. If there are no cache settings on your origin, the content will not be cached. The list of HTTP response codes that can be cached in browsers: 200, 201, 204, 206, 301, 302, 303, 304, 307, 308. Other response codes will not be cached. The default value is 4 days. + BrowserCacheSettings *float64 `json:"browserCacheSettings,omitempty" tf:"browser_cache_settings,omitempty"` -// set up a cache period for the end-users browser. Content will be cached due to origin settings. If there are no cache settings on your origin, the content will not be cached. The list of HTTP response codes that can be cached in browsers: 200, 201, 204, 206, 301, 302, 303, 304, 307, 308. Other response codes will not be cached. The default value is 4 days. -BrowserCacheSettings *float64 `json:"browserCacheSettings,omitempty" tf:"browser_cache_settings,omitempty"` + // list HTTP headers that must be included in responses to clients. + CacheHTTPHeaders []*string `json:"cacheHttpHeaders,omitempty" tf:"cache_http_headers,omitempty"` -// list HTTP headers that must be included in responses to clients. -CacheHTTPHeaders []*string `json:"cacheHttpHeaders,omitempty" tf:"cache_http_headers,omitempty"` + // parameter that lets browsers get access to selected resources from a domain different to a domain from which the request is received. + Cors []*string `json:"cors,omitempty" tf:"cors,omitempty"` -// parameter that lets browsers get access to selected resources from a domain different to a domain from which the request is received. -Cors []*string `json:"cors,omitempty" tf:"cors,omitempty"` + // custom value for the Host header. Your server must be able to process requests with the chosen header. + CustomHostHeader *string `json:"customHostHeader,omitempty" tf:"custom_host_header,omitempty"` -// custom value for the Host header. Your server must be able to process requests with the chosen header. -CustomHostHeader *string `json:"customHostHeader,omitempty" tf:"custom_host_header,omitempty"` + // wildcard additional CNAME. If a resource has a wildcard additional CNAME, you can use your own certificate for content delivery via HTTPS. Read-only. + CustomServerName *string `json:"customServerName,omitempty" tf:"custom_server_name,omitempty"` -// wildcard additional CNAME. If a resource has a wildcard additional CNAME, you can use your own certificate for content delivery via HTTPS. Read-only. -CustomServerName *string `json:"customServerName,omitempty" tf:"custom_server_name,omitempty"` + // setup a cache status. + DisableCache *bool `json:"disableCache,omitempty" tf:"disable_cache,omitempty"` -// setup a cache status. -DisableCache *bool `json:"disableCache,omitempty" tf:"disable_cache,omitempty"` + // disabling proxy force ranges. + DisableProxyForceRanges *bool `json:"disableProxyForceRanges,omitempty" tf:"disable_proxy_force_ranges,omitempty"` -// disabling proxy force ranges. -DisableProxyForceRanges *bool `json:"disableProxyForceRanges,omitempty" tf:"disable_proxy_force_ranges,omitempty"` + // content will be cached according to origin cache settings. The value applies for a response with codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 if an origin server does not have caching HTTP headers. Responses with other codes will not be cached. + EdgeCacheSettings *float64 `json:"edgeCacheSettings,omitempty" tf:"edge_cache_settings,omitempty"` -// content will be cached according to origin cache settings. The value applies for a response with codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 if an origin server does not have caching HTTP headers. Responses with other codes will not be cached. -EdgeCacheSettings *float64 `json:"edgeCacheSettings,omitempty" tf:"edge_cache_settings,omitempty"` + // enable access limiting by IP addresses, option available only with setting secure_key. + EnableIPURLSigning *bool `json:"enableIpUrlSigning,omitempty" tf:"enable_ip_url_signing,omitempty"` -// enable access limiting by IP addresses, option available only with setting secure_key. -EnableIPURLSigning *bool `json:"enableIpUrlSigning,omitempty" tf:"enable_ip_url_signing,omitempty"` + // option helps you to reduce the bandwidth between origin and CDN servers. Also, content delivery speed becomes higher because of reducing the time for compressing files in a CDN. + FetchedCompressed *bool `json:"fetchedCompressed,omitempty" tf:"fetched_compressed,omitempty"` -// option helps you to reduce the bandwidth between origin and CDN servers. Also, content delivery speed becomes higher because of reducing the time for compressing files in a CDN. -FetchedCompressed *bool `json:"fetchedCompressed,omitempty" tf:"fetched_compressed,omitempty"` + // choose the Forward Host header option if is important to send in the request to the Origin the same Host header as was sent in the request to CDN server. + ForwardHostHeader *bool `json:"forwardHostHeader,omitempty" tf:"forward_host_header,omitempty"` -// choose the Forward Host header option if is important to send in the request to the Origin the same Host header as was sent in the request to CDN server. -ForwardHostHeader *bool `json:"forwardHostHeader,omitempty" tf:"forward_host_header,omitempty"` + // GZip compression at CDN servers reduces file size by 70% and can be as high as 90%. + GzipOn *bool `json:"gzipOn,omitempty" tf:"gzip_on,omitempty"` -// GZip compression at CDN servers reduces file size by 70% and can be as high as 90%. -GzipOn *bool `json:"gzipOn,omitempty" tf:"gzip_on,omitempty"` + IPAddressACL []IPAddressACLInitParameters `json:"ipAddressAcl,omitempty" tf:"ip_address_acl,omitempty"` -IPAddressACL []IPAddressACLInitParameters `json:"ipAddressAcl,omitempty" tf:"ip_address_acl,omitempty"` + // set for ignoring cookie. + IgnoreCookie *bool `json:"ignoreCookie,omitempty" tf:"ignore_cookie,omitempty"` -// set for ignoring cookie. -IgnoreCookie *bool `json:"ignoreCookie,omitempty" tf:"ignore_cookie,omitempty"` + // files with different query parameters are cached as objects with the same key regardless of the parameter value. selected by default. + IgnoreQueryParams *bool `json:"ignoreQueryParams,omitempty" tf:"ignore_query_params,omitempty"` -// files with different query parameters are cached as objects with the same key regardless of the parameter value. selected by default. -IgnoreQueryParams *bool `json:"ignoreQueryParams,omitempty" tf:"ignore_query_params,omitempty"` + // allows caching for GET, HEAD and POST requests. + ProxyCacheMethodsSet *bool `json:"proxyCacheMethodsSet,omitempty" tf:"proxy_cache_methods_set,omitempty"` -// allows caching for GET, HEAD and POST requests. -ProxyCacheMethodsSet *bool `json:"proxyCacheMethodsSet,omitempty" tf:"proxy_cache_methods_set,omitempty"` + // files with the specified query parameters are cached as objects with the same key, files with other parameters are cached as objects with different keys. + QueryParamsBlacklist []*string `json:"queryParamsBlacklist,omitempty" tf:"query_params_blacklist,omitempty"` -// files with the specified query parameters are cached as objects with the same key, files with other parameters are cached as objects with different keys. -QueryParamsBlacklist []*string `json:"queryParamsBlacklist,omitempty" tf:"query_params_blacklist,omitempty"` + // files with the specified query parameters are cached as objects with different keys, files with other parameters are cached as objects with the same key. + QueryParamsWhitelist []*string `json:"queryParamsWhitelist,omitempty" tf:"query_params_whitelist,omitempty"` -// files with the specified query parameters are cached as objects with different keys, files with other parameters are cached as objects with the same key. -QueryParamsWhitelist []*string `json:"queryParamsWhitelist,omitempty" tf:"query_params_whitelist,omitempty"` + // set up a redirect from HTTPS to HTTP. + RedirectHTTPSToHTTP *bool `json:"redirectHttpsToHttp,omitempty" tf:"redirect_https_to_http,omitempty"` -// set up a redirect from HTTPS to HTTP. -RedirectHTTPSToHTTP *bool `json:"redirectHttpsToHttp,omitempty" tf:"redirect_https_to_http,omitempty"` + // set up a redirect from HTTP to HTTPS. + RedirectHTTPToHTTPS *bool `json:"redirectHttpToHttps,omitempty" tf:"redirect_http_to_https,omitempty"` -// set up a redirect from HTTP to HTTPS. -RedirectHTTPToHTTPS *bool `json:"redirectHttpToHttps,omitempty" tf:"redirect_http_to_https,omitempty"` + // set secure key for url encoding to protect contect and limit access by IP addresses and time limits. + SecureKey *string `json:"secureKey,omitempty" tf:"secure_key,omitempty"` -// set secure key for url encoding to protect contect and limit access by IP addresses and time limits. -SecureKey *string `json:"secureKey,omitempty" tf:"secure_key,omitempty"` + // files larger than 10 MB will be requested and cached in parts (no larger than 10 MB each part). It reduces time to first byte. The origin must support HTTP Range requests. + Slice *bool `json:"slice,omitempty" tf:"slice,omitempty"` -// files larger than 10 MB will be requested and cached in parts (no larger than 10 MB each part). It reduces time to first byte. The origin must support HTTP Range requests. -Slice *bool `json:"slice,omitempty" tf:"slice,omitempty"` + // set up custom headers that CDN servers will send in requests to origins. + // +mapType=granular + StaticRequestHeaders map[string]*string `json:"staticRequestHeaders,omitempty" tf:"static_request_headers,omitempty"` -// set up custom headers that CDN servers will send in requests to origins. -// +mapType=granular -StaticRequestHeaders map[string]*string `json:"staticRequestHeaders,omitempty" tf:"static_request_headers,omitempty"` - -// set up custom headers that CDN servers will send in response to clients. -// +mapType=granular -StaticResponseHeaders map[string]*string `json:"staticResponseHeaders,omitempty" tf:"static_response_headers,omitempty"` + // set up custom headers that CDN servers will send in response to clients. + // +mapType=granular + StaticResponseHeaders map[string]*string `json:"staticResponseHeaders,omitempty" tf:"static_response_headers,omitempty"` } - type OptionsObservation struct { + // HTTP methods for your CDN content. By default the following methods are allowed: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS. In case some methods are not allowed to the user, they will get the 405 (Method Not Allowed) response. If the method is not supported, the user gets the 501 (Not Implemented) response. + AllowedHTTPMethods []*string `json:"allowedHttpMethods,omitempty" tf:"allowed_http_methods,omitempty"` -// HTTP methods for your CDN content. By default the following methods are allowed: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS. In case some methods are not allowed to the user, they will get the 405 (Method Not Allowed) response. If the method is not supported, the user gets the 501 (Not Implemented) response. -AllowedHTTPMethods []*string `json:"allowedHttpMethods,omitempty" tf:"allowed_http_methods,omitempty"` + // set up a cache period for the end-users browser. Content will be cached due to origin settings. If there are no cache settings on your origin, the content will not be cached. The list of HTTP response codes that can be cached in browsers: 200, 201, 204, 206, 301, 302, 303, 304, 307, 308. Other response codes will not be cached. The default value is 4 days. + BrowserCacheSettings *float64 `json:"browserCacheSettings,omitempty" tf:"browser_cache_settings,omitempty"` -// set up a cache period for the end-users browser. Content will be cached due to origin settings. If there are no cache settings on your origin, the content will not be cached. The list of HTTP response codes that can be cached in browsers: 200, 201, 204, 206, 301, 302, 303, 304, 307, 308. Other response codes will not be cached. The default value is 4 days. -BrowserCacheSettings *float64 `json:"browserCacheSettings,omitempty" tf:"browser_cache_settings,omitempty"` + // list HTTP headers that must be included in responses to clients. + CacheHTTPHeaders []*string `json:"cacheHttpHeaders,omitempty" tf:"cache_http_headers,omitempty"` -// list HTTP headers that must be included in responses to clients. -CacheHTTPHeaders []*string `json:"cacheHttpHeaders,omitempty" tf:"cache_http_headers,omitempty"` + // parameter that lets browsers get access to selected resources from a domain different to a domain from which the request is received. + Cors []*string `json:"cors,omitempty" tf:"cors,omitempty"` -// parameter that lets browsers get access to selected resources from a domain different to a domain from which the request is received. -Cors []*string `json:"cors,omitempty" tf:"cors,omitempty"` + // custom value for the Host header. Your server must be able to process requests with the chosen header. + CustomHostHeader *string `json:"customHostHeader,omitempty" tf:"custom_host_header,omitempty"` -// custom value for the Host header. Your server must be able to process requests with the chosen header. -CustomHostHeader *string `json:"customHostHeader,omitempty" tf:"custom_host_header,omitempty"` + // wildcard additional CNAME. If a resource has a wildcard additional CNAME, you can use your own certificate for content delivery via HTTPS. Read-only. + CustomServerName *string `json:"customServerName,omitempty" tf:"custom_server_name,omitempty"` -// wildcard additional CNAME. If a resource has a wildcard additional CNAME, you can use your own certificate for content delivery via HTTPS. Read-only. -CustomServerName *string `json:"customServerName,omitempty" tf:"custom_server_name,omitempty"` + // setup a cache status. + DisableCache *bool `json:"disableCache,omitempty" tf:"disable_cache,omitempty"` -// setup a cache status. -DisableCache *bool `json:"disableCache,omitempty" tf:"disable_cache,omitempty"` + // disabling proxy force ranges. + DisableProxyForceRanges *bool `json:"disableProxyForceRanges,omitempty" tf:"disable_proxy_force_ranges,omitempty"` -// disabling proxy force ranges. -DisableProxyForceRanges *bool `json:"disableProxyForceRanges,omitempty" tf:"disable_proxy_force_ranges,omitempty"` + // content will be cached according to origin cache settings. The value applies for a response with codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 if an origin server does not have caching HTTP headers. Responses with other codes will not be cached. + EdgeCacheSettings *float64 `json:"edgeCacheSettings,omitempty" tf:"edge_cache_settings,omitempty"` -// content will be cached according to origin cache settings. The value applies for a response with codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 if an origin server does not have caching HTTP headers. Responses with other codes will not be cached. -EdgeCacheSettings *float64 `json:"edgeCacheSettings,omitempty" tf:"edge_cache_settings,omitempty"` + // enable access limiting by IP addresses, option available only with setting secure_key. + EnableIPURLSigning *bool `json:"enableIpUrlSigning,omitempty" tf:"enable_ip_url_signing,omitempty"` -// enable access limiting by IP addresses, option available only with setting secure_key. -EnableIPURLSigning *bool `json:"enableIpUrlSigning,omitempty" tf:"enable_ip_url_signing,omitempty"` + // option helps you to reduce the bandwidth between origin and CDN servers. Also, content delivery speed becomes higher because of reducing the time for compressing files in a CDN. + FetchedCompressed *bool `json:"fetchedCompressed,omitempty" tf:"fetched_compressed,omitempty"` -// option helps you to reduce the bandwidth between origin and CDN servers. Also, content delivery speed becomes higher because of reducing the time for compressing files in a CDN. -FetchedCompressed *bool `json:"fetchedCompressed,omitempty" tf:"fetched_compressed,omitempty"` + // choose the Forward Host header option if is important to send in the request to the Origin the same Host header as was sent in the request to CDN server. + ForwardHostHeader *bool `json:"forwardHostHeader,omitempty" tf:"forward_host_header,omitempty"` -// choose the Forward Host header option if is important to send in the request to the Origin the same Host header as was sent in the request to CDN server. -ForwardHostHeader *bool `json:"forwardHostHeader,omitempty" tf:"forward_host_header,omitempty"` + // GZip compression at CDN servers reduces file size by 70% and can be as high as 90%. + GzipOn *bool `json:"gzipOn,omitempty" tf:"gzip_on,omitempty"` -// GZip compression at CDN servers reduces file size by 70% and can be as high as 90%. -GzipOn *bool `json:"gzipOn,omitempty" tf:"gzip_on,omitempty"` + IPAddressACL []IPAddressACLObservation `json:"ipAddressAcl,omitempty" tf:"ip_address_acl,omitempty"` -IPAddressACL []IPAddressACLObservation `json:"ipAddressAcl,omitempty" tf:"ip_address_acl,omitempty"` + // set for ignoring cookie. + IgnoreCookie *bool `json:"ignoreCookie,omitempty" tf:"ignore_cookie,omitempty"` -// set for ignoring cookie. -IgnoreCookie *bool `json:"ignoreCookie,omitempty" tf:"ignore_cookie,omitempty"` + // files with different query parameters are cached as objects with the same key regardless of the parameter value. selected by default. + IgnoreQueryParams *bool `json:"ignoreQueryParams,omitempty" tf:"ignore_query_params,omitempty"` -// files with different query parameters are cached as objects with the same key regardless of the parameter value. selected by default. -IgnoreQueryParams *bool `json:"ignoreQueryParams,omitempty" tf:"ignore_query_params,omitempty"` + // allows caching for GET, HEAD and POST requests. + ProxyCacheMethodsSet *bool `json:"proxyCacheMethodsSet,omitempty" tf:"proxy_cache_methods_set,omitempty"` -// allows caching for GET, HEAD and POST requests. -ProxyCacheMethodsSet *bool `json:"proxyCacheMethodsSet,omitempty" tf:"proxy_cache_methods_set,omitempty"` + // files with the specified query parameters are cached as objects with the same key, files with other parameters are cached as objects with different keys. + QueryParamsBlacklist []*string `json:"queryParamsBlacklist,omitempty" tf:"query_params_blacklist,omitempty"` -// files with the specified query parameters are cached as objects with the same key, files with other parameters are cached as objects with different keys. -QueryParamsBlacklist []*string `json:"queryParamsBlacklist,omitempty" tf:"query_params_blacklist,omitempty"` + // files with the specified query parameters are cached as objects with different keys, files with other parameters are cached as objects with the same key. + QueryParamsWhitelist []*string `json:"queryParamsWhitelist,omitempty" tf:"query_params_whitelist,omitempty"` -// files with the specified query parameters are cached as objects with different keys, files with other parameters are cached as objects with the same key. -QueryParamsWhitelist []*string `json:"queryParamsWhitelist,omitempty" tf:"query_params_whitelist,omitempty"` + // set up a redirect from HTTPS to HTTP. + RedirectHTTPSToHTTP *bool `json:"redirectHttpsToHttp,omitempty" tf:"redirect_https_to_http,omitempty"` -// set up a redirect from HTTPS to HTTP. -RedirectHTTPSToHTTP *bool `json:"redirectHttpsToHttp,omitempty" tf:"redirect_https_to_http,omitempty"` + // set up a redirect from HTTP to HTTPS. + RedirectHTTPToHTTPS *bool `json:"redirectHttpToHttps,omitempty" tf:"redirect_http_to_https,omitempty"` -// set up a redirect from HTTP to HTTPS. -RedirectHTTPToHTTPS *bool `json:"redirectHttpToHttps,omitempty" tf:"redirect_http_to_https,omitempty"` + // set secure key for url encoding to protect contect and limit access by IP addresses and time limits. + SecureKey *string `json:"secureKey,omitempty" tf:"secure_key,omitempty"` -// set secure key for url encoding to protect contect and limit access by IP addresses and time limits. -SecureKey *string `json:"secureKey,omitempty" tf:"secure_key,omitempty"` + // files larger than 10 MB will be requested and cached in parts (no larger than 10 MB each part). It reduces time to first byte. The origin must support HTTP Range requests. + Slice *bool `json:"slice,omitempty" tf:"slice,omitempty"` -// files larger than 10 MB will be requested and cached in parts (no larger than 10 MB each part). It reduces time to first byte. The origin must support HTTP Range requests. -Slice *bool `json:"slice,omitempty" tf:"slice,omitempty"` + // set up custom headers that CDN servers will send in requests to origins. + // +mapType=granular + StaticRequestHeaders map[string]*string `json:"staticRequestHeaders,omitempty" tf:"static_request_headers,omitempty"` -// set up custom headers that CDN servers will send in requests to origins. -// +mapType=granular -StaticRequestHeaders map[string]*string `json:"staticRequestHeaders,omitempty" tf:"static_request_headers,omitempty"` - -// set up custom headers that CDN servers will send in response to clients. -// +mapType=granular -StaticResponseHeaders map[string]*string `json:"staticResponseHeaders,omitempty" tf:"static_response_headers,omitempty"` + // set up custom headers that CDN servers will send in response to clients. + // +mapType=granular + StaticResponseHeaders map[string]*string `json:"staticResponseHeaders,omitempty" tf:"static_response_headers,omitempty"` } - type OptionsParameters struct { + // HTTP methods for your CDN content. By default the following methods are allowed: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS. In case some methods are not allowed to the user, they will get the 405 (Method Not Allowed) response. If the method is not supported, the user gets the 501 (Not Implemented) response. + // +kubebuilder:validation:Optional + AllowedHTTPMethods []*string `json:"allowedHttpMethods,omitempty" tf:"allowed_http_methods,omitempty"` -// HTTP methods for your CDN content. By default the following methods are allowed: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS. In case some methods are not allowed to the user, they will get the 405 (Method Not Allowed) response. If the method is not supported, the user gets the 501 (Not Implemented) response. -// +kubebuilder:validation:Optional -AllowedHTTPMethods []*string `json:"allowedHttpMethods,omitempty" tf:"allowed_http_methods,omitempty"` + // set up a cache period for the end-users browser. Content will be cached due to origin settings. If there are no cache settings on your origin, the content will not be cached. The list of HTTP response codes that can be cached in browsers: 200, 201, 204, 206, 301, 302, 303, 304, 307, 308. Other response codes will not be cached. The default value is 4 days. + // +kubebuilder:validation:Optional + BrowserCacheSettings *float64 `json:"browserCacheSettings,omitempty" tf:"browser_cache_settings,omitempty"` -// set up a cache period for the end-users browser. Content will be cached due to origin settings. If there are no cache settings on your origin, the content will not be cached. The list of HTTP response codes that can be cached in browsers: 200, 201, 204, 206, 301, 302, 303, 304, 307, 308. Other response codes will not be cached. The default value is 4 days. -// +kubebuilder:validation:Optional -BrowserCacheSettings *float64 `json:"browserCacheSettings,omitempty" tf:"browser_cache_settings,omitempty"` + // list HTTP headers that must be included in responses to clients. + // +kubebuilder:validation:Optional + CacheHTTPHeaders []*string `json:"cacheHttpHeaders,omitempty" tf:"cache_http_headers,omitempty"` -// list HTTP headers that must be included in responses to clients. -// +kubebuilder:validation:Optional -CacheHTTPHeaders []*string `json:"cacheHttpHeaders,omitempty" tf:"cache_http_headers,omitempty"` + // parameter that lets browsers get access to selected resources from a domain different to a domain from which the request is received. + // +kubebuilder:validation:Optional + Cors []*string `json:"cors,omitempty" tf:"cors,omitempty"` -// parameter that lets browsers get access to selected resources from a domain different to a domain from which the request is received. -// +kubebuilder:validation:Optional -Cors []*string `json:"cors,omitempty" tf:"cors,omitempty"` + // custom value for the Host header. Your server must be able to process requests with the chosen header. + // +kubebuilder:validation:Optional + CustomHostHeader *string `json:"customHostHeader,omitempty" tf:"custom_host_header,omitempty"` -// custom value for the Host header. Your server must be able to process requests with the chosen header. -// +kubebuilder:validation:Optional -CustomHostHeader *string `json:"customHostHeader,omitempty" tf:"custom_host_header,omitempty"` + // wildcard additional CNAME. If a resource has a wildcard additional CNAME, you can use your own certificate for content delivery via HTTPS. Read-only. + // +kubebuilder:validation:Optional + CustomServerName *string `json:"customServerName,omitempty" tf:"custom_server_name,omitempty"` -// wildcard additional CNAME. If a resource has a wildcard additional CNAME, you can use your own certificate for content delivery via HTTPS. Read-only. -// +kubebuilder:validation:Optional -CustomServerName *string `json:"customServerName,omitempty" tf:"custom_server_name,omitempty"` + // setup a cache status. + // +kubebuilder:validation:Optional + DisableCache *bool `json:"disableCache,omitempty" tf:"disable_cache,omitempty"` -// setup a cache status. -// +kubebuilder:validation:Optional -DisableCache *bool `json:"disableCache,omitempty" tf:"disable_cache,omitempty"` + // disabling proxy force ranges. + // +kubebuilder:validation:Optional + DisableProxyForceRanges *bool `json:"disableProxyForceRanges,omitempty" tf:"disable_proxy_force_ranges,omitempty"` -// disabling proxy force ranges. -// +kubebuilder:validation:Optional -DisableProxyForceRanges *bool `json:"disableProxyForceRanges,omitempty" tf:"disable_proxy_force_ranges,omitempty"` + // content will be cached according to origin cache settings. The value applies for a response with codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 if an origin server does not have caching HTTP headers. Responses with other codes will not be cached. + // +kubebuilder:validation:Optional + EdgeCacheSettings *float64 `json:"edgeCacheSettings,omitempty" tf:"edge_cache_settings,omitempty"` -// content will be cached according to origin cache settings. The value applies for a response with codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 if an origin server does not have caching HTTP headers. Responses with other codes will not be cached. -// +kubebuilder:validation:Optional -EdgeCacheSettings *float64 `json:"edgeCacheSettings,omitempty" tf:"edge_cache_settings,omitempty"` + // enable access limiting by IP addresses, option available only with setting secure_key. + // +kubebuilder:validation:Optional + EnableIPURLSigning *bool `json:"enableIpUrlSigning,omitempty" tf:"enable_ip_url_signing,omitempty"` -// enable access limiting by IP addresses, option available only with setting secure_key. -// +kubebuilder:validation:Optional -EnableIPURLSigning *bool `json:"enableIpUrlSigning,omitempty" tf:"enable_ip_url_signing,omitempty"` + // option helps you to reduce the bandwidth between origin and CDN servers. Also, content delivery speed becomes higher because of reducing the time for compressing files in a CDN. + // +kubebuilder:validation:Optional + FetchedCompressed *bool `json:"fetchedCompressed,omitempty" tf:"fetched_compressed,omitempty"` -// option helps you to reduce the bandwidth between origin and CDN servers. Also, content delivery speed becomes higher because of reducing the time for compressing files in a CDN. -// +kubebuilder:validation:Optional -FetchedCompressed *bool `json:"fetchedCompressed,omitempty" tf:"fetched_compressed,omitempty"` + // choose the Forward Host header option if is important to send in the request to the Origin the same Host header as was sent in the request to CDN server. + // +kubebuilder:validation:Optional + ForwardHostHeader *bool `json:"forwardHostHeader,omitempty" tf:"forward_host_header,omitempty"` -// choose the Forward Host header option if is important to send in the request to the Origin the same Host header as was sent in the request to CDN server. -// +kubebuilder:validation:Optional -ForwardHostHeader *bool `json:"forwardHostHeader,omitempty" tf:"forward_host_header,omitempty"` + // GZip compression at CDN servers reduces file size by 70% and can be as high as 90%. + // +kubebuilder:validation:Optional + GzipOn *bool `json:"gzipOn,omitempty" tf:"gzip_on,omitempty"` -// GZip compression at CDN servers reduces file size by 70% and can be as high as 90%. -// +kubebuilder:validation:Optional -GzipOn *bool `json:"gzipOn,omitempty" tf:"gzip_on,omitempty"` + // +kubebuilder:validation:Optional + IPAddressACL []IPAddressACLParameters `json:"ipAddressAcl,omitempty" tf:"ip_address_acl,omitempty"` -// +kubebuilder:validation:Optional -IPAddressACL []IPAddressACLParameters `json:"ipAddressAcl,omitempty" tf:"ip_address_acl,omitempty"` + // set for ignoring cookie. + // +kubebuilder:validation:Optional + IgnoreCookie *bool `json:"ignoreCookie,omitempty" tf:"ignore_cookie,omitempty"` -// set for ignoring cookie. -// +kubebuilder:validation:Optional -IgnoreCookie *bool `json:"ignoreCookie,omitempty" tf:"ignore_cookie,omitempty"` + // files with different query parameters are cached as objects with the same key regardless of the parameter value. selected by default. + // +kubebuilder:validation:Optional + IgnoreQueryParams *bool `json:"ignoreQueryParams,omitempty" tf:"ignore_query_params,omitempty"` -// files with different query parameters are cached as objects with the same key regardless of the parameter value. selected by default. -// +kubebuilder:validation:Optional -IgnoreQueryParams *bool `json:"ignoreQueryParams,omitempty" tf:"ignore_query_params,omitempty"` + // allows caching for GET, HEAD and POST requests. + // +kubebuilder:validation:Optional + ProxyCacheMethodsSet *bool `json:"proxyCacheMethodsSet,omitempty" tf:"proxy_cache_methods_set,omitempty"` -// allows caching for GET, HEAD and POST requests. -// +kubebuilder:validation:Optional -ProxyCacheMethodsSet *bool `json:"proxyCacheMethodsSet,omitempty" tf:"proxy_cache_methods_set,omitempty"` + // files with the specified query parameters are cached as objects with the same key, files with other parameters are cached as objects with different keys. + // +kubebuilder:validation:Optional + QueryParamsBlacklist []*string `json:"queryParamsBlacklist,omitempty" tf:"query_params_blacklist,omitempty"` -// files with the specified query parameters are cached as objects with the same key, files with other parameters are cached as objects with different keys. -// +kubebuilder:validation:Optional -QueryParamsBlacklist []*string `json:"queryParamsBlacklist,omitempty" tf:"query_params_blacklist,omitempty"` + // files with the specified query parameters are cached as objects with different keys, files with other parameters are cached as objects with the same key. + // +kubebuilder:validation:Optional + QueryParamsWhitelist []*string `json:"queryParamsWhitelist,omitempty" tf:"query_params_whitelist,omitempty"` -// files with the specified query parameters are cached as objects with different keys, files with other parameters are cached as objects with the same key. -// +kubebuilder:validation:Optional -QueryParamsWhitelist []*string `json:"queryParamsWhitelist,omitempty" tf:"query_params_whitelist,omitempty"` + // set up a redirect from HTTPS to HTTP. + // +kubebuilder:validation:Optional + RedirectHTTPSToHTTP *bool `json:"redirectHttpsToHttp,omitempty" tf:"redirect_https_to_http,omitempty"` -// set up a redirect from HTTPS to HTTP. -// +kubebuilder:validation:Optional -RedirectHTTPSToHTTP *bool `json:"redirectHttpsToHttp,omitempty" tf:"redirect_https_to_http,omitempty"` + // set up a redirect from HTTP to HTTPS. + // +kubebuilder:validation:Optional + RedirectHTTPToHTTPS *bool `json:"redirectHttpToHttps,omitempty" tf:"redirect_http_to_https,omitempty"` -// set up a redirect from HTTP to HTTPS. -// +kubebuilder:validation:Optional -RedirectHTTPToHTTPS *bool `json:"redirectHttpToHttps,omitempty" tf:"redirect_http_to_https,omitempty"` + // set secure key for url encoding to protect contect and limit access by IP addresses and time limits. + // +kubebuilder:validation:Optional + SecureKey *string `json:"secureKey,omitempty" tf:"secure_key,omitempty"` -// set secure key for url encoding to protect contect and limit access by IP addresses and time limits. -// +kubebuilder:validation:Optional -SecureKey *string `json:"secureKey,omitempty" tf:"secure_key,omitempty"` + // files larger than 10 MB will be requested and cached in parts (no larger than 10 MB each part). It reduces time to first byte. The origin must support HTTP Range requests. + // +kubebuilder:validation:Optional + Slice *bool `json:"slice,omitempty" tf:"slice,omitempty"` -// files larger than 10 MB will be requested and cached in parts (no larger than 10 MB each part). It reduces time to first byte. The origin must support HTTP Range requests. -// +kubebuilder:validation:Optional -Slice *bool `json:"slice,omitempty" tf:"slice,omitempty"` + // set up custom headers that CDN servers will send in requests to origins. + // +kubebuilder:validation:Optional + // +mapType=granular + StaticRequestHeaders map[string]*string `json:"staticRequestHeaders,omitempty" tf:"static_request_headers,omitempty"` -// set up custom headers that CDN servers will send in requests to origins. -// +kubebuilder:validation:Optional -// +mapType=granular -StaticRequestHeaders map[string]*string `json:"staticRequestHeaders,omitempty" tf:"static_request_headers,omitempty"` - -// set up custom headers that CDN servers will send in response to clients. -// +kubebuilder:validation:Optional -// +mapType=granular -StaticResponseHeaders map[string]*string `json:"staticResponseHeaders,omitempty" tf:"static_response_headers,omitempty"` + // set up custom headers that CDN servers will send in response to clients. + // +kubebuilder:validation:Optional + // +mapType=granular + StaticResponseHeaders map[string]*string `json:"staticResponseHeaders,omitempty" tf:"static_response_headers,omitempty"` } - type ResourceInitParameters struct { + // Flag to create Resource either in active or disabled state. True - the content from CDN is available to clients. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` -// Flag to create Resource either in active or disabled state. True - the content from CDN is available to clients. -Active *bool `json:"active,omitempty" tf:"active,omitempty"` + // CDN endpoint CNAME, must be unique among resources. + Cname *string `json:"cname,omitempty" tf:"cname,omitempty"` -// CDN endpoint CNAME, must be unique among resources. -Cname *string `json:"cname,omitempty" tf:"cname,omitempty"` + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // CDN Resource settings and options to tune CDN edge behavior. + Options []OptionsInitParameters `json:"options,omitempty" tf:"options,omitempty"` -// CDN Resource settings and options to tune CDN edge behavior. -Options []OptionsInitParameters `json:"options,omitempty" tf:"options,omitempty"` + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1.OriginGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1.OriginGroup -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` + // Reference to a OriginGroup in cdn to populate originGroupId. + // +kubebuilder:validation:Optional + OriginGroupIDRef *v1.Reference `json:"originGroupIdRef,omitempty" tf:"-"` -// Reference to a OriginGroup in cdn to populate originGroupId. -// +kubebuilder:validation:Optional -OriginGroupIDRef *v1.Reference `json:"originGroupIdRef,omitempty" tf:"-"` + // Selector for a OriginGroup in cdn to populate originGroupId. + // +kubebuilder:validation:Optional + OriginGroupIDSelector *v1.Selector `json:"originGroupIdSelector,omitempty" tf:"-"` -// Selector for a OriginGroup in cdn to populate originGroupId. -// +kubebuilder:validation:Optional -OriginGroupIDSelector *v1.Selector `json:"originGroupIdSelector,omitempty" tf:"-"` + OriginGroupName *string `json:"originGroupName,omitempty" tf:"origin_group_name,omitempty"` -OriginGroupName *string `json:"originGroupName,omitempty" tf:"origin_group_name,omitempty"` + OriginProtocol *string `json:"originProtocol,omitempty" tf:"origin_protocol,omitempty"` -OriginProtocol *string `json:"originProtocol,omitempty" tf:"origin_protocol,omitempty"` + // SSL certificate of CDN resource. + SSLCertificate []SSLCertificateInitParameters `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` -// SSL certificate of CDN resource. -SSLCertificate []SSLCertificateInitParameters `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` + // list of secondary hostname strings. + // +listType=set + SecondaryHostnames []*string `json:"secondaryHostnames,omitempty" tf:"secondary_hostnames,omitempty"` -// list of secondary hostname strings. -// +listType=set -SecondaryHostnames []*string `json:"secondaryHostnames,omitempty" tf:"secondary_hostnames,omitempty"` - -UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` } - type ResourceObservation struct { + // Flag to create Resource either in active or disabled state. True - the content from CDN is available to clients. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` -// Flag to create Resource either in active or disabled state. True - the content from CDN is available to clients. -Active *bool `json:"active,omitempty" tf:"active,omitempty"` + // CDN endpoint CNAME, must be unique among resources. + Cname *string `json:"cname,omitempty" tf:"cname,omitempty"` -// CDN endpoint CNAME, must be unique among resources. -Cname *string `json:"cname,omitempty" tf:"cname,omitempty"` + // Creation timestamp of the IoT Core Device + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Creation timestamp of the IoT Core Device -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // CDN Resource settings and options to tune CDN edge behavior. + Options []OptionsObservation `json:"options,omitempty" tf:"options,omitempty"` -// CDN Resource settings and options to tune CDN edge behavior. -Options []OptionsObservation `json:"options,omitempty" tf:"options,omitempty"` + OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` -OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` + OriginGroupName *string `json:"originGroupName,omitempty" tf:"origin_group_name,omitempty"` -OriginGroupName *string `json:"originGroupName,omitempty" tf:"origin_group_name,omitempty"` + OriginProtocol *string `json:"originProtocol,omitempty" tf:"origin_protocol,omitempty"` -OriginProtocol *string `json:"originProtocol,omitempty" tf:"origin_protocol,omitempty"` + // provider CNAME of CDN resource, computed value for read and update operations. + ProviderCname *string `json:"providerCname,omitempty" tf:"provider_cname,omitempty"` -// provider CNAME of CDN resource, computed value for read and update operations. -ProviderCname *string `json:"providerCname,omitempty" tf:"provider_cname,omitempty"` + // SSL certificate of CDN resource. + SSLCertificate []SSLCertificateObservation `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` -// SSL certificate of CDN resource. -SSLCertificate []SSLCertificateObservation `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` + // list of secondary hostname strings. + // +listType=set + SecondaryHostnames []*string `json:"secondaryHostnames,omitempty" tf:"secondary_hostnames,omitempty"` -// list of secondary hostname strings. -// +listType=set -SecondaryHostnames []*string `json:"secondaryHostnames,omitempty" tf:"secondary_hostnames,omitempty"` - -UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` } - type ResourceParameters struct { + // Flag to create Resource either in active or disabled state. True - the content from CDN is available to clients. + // +kubebuilder:validation:Optional + Active *bool `json:"active,omitempty" tf:"active,omitempty"` -// Flag to create Resource either in active or disabled state. True - the content from CDN is available to clients. -// +kubebuilder:validation:Optional -Active *bool `json:"active,omitempty" tf:"active,omitempty"` + // CDN endpoint CNAME, must be unique among resources. + // +kubebuilder:validation:Optional + Cname *string `json:"cname,omitempty" tf:"cname,omitempty"` -// CDN endpoint CNAME, must be unique among resources. -// +kubebuilder:validation:Optional -Cname *string `json:"cname,omitempty" tf:"cname,omitempty"` + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // CDN Resource settings and options to tune CDN edge behavior. + // +kubebuilder:validation:Optional + Options []OptionsParameters `json:"options,omitempty" tf:"options,omitempty"` -// CDN Resource settings and options to tune CDN edge behavior. -// +kubebuilder:validation:Optional -Options []OptionsParameters `json:"options,omitempty" tf:"options,omitempty"` + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1.OriginGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1.OriginGroup -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` + // Reference to a OriginGroup in cdn to populate originGroupId. + // +kubebuilder:validation:Optional + OriginGroupIDRef *v1.Reference `json:"originGroupIdRef,omitempty" tf:"-"` -// Reference to a OriginGroup in cdn to populate originGroupId. -// +kubebuilder:validation:Optional -OriginGroupIDRef *v1.Reference `json:"originGroupIdRef,omitempty" tf:"-"` + // Selector for a OriginGroup in cdn to populate originGroupId. + // +kubebuilder:validation:Optional + OriginGroupIDSelector *v1.Selector `json:"originGroupIdSelector,omitempty" tf:"-"` -// Selector for a OriginGroup in cdn to populate originGroupId. -// +kubebuilder:validation:Optional -OriginGroupIDSelector *v1.Selector `json:"originGroupIdSelector,omitempty" tf:"-"` + // +kubebuilder:validation:Optional + OriginGroupName *string `json:"originGroupName,omitempty" tf:"origin_group_name,omitempty"` -// +kubebuilder:validation:Optional -OriginGroupName *string `json:"originGroupName,omitempty" tf:"origin_group_name,omitempty"` + // +kubebuilder:validation:Optional + OriginProtocol *string `json:"originProtocol,omitempty" tf:"origin_protocol,omitempty"` -// +kubebuilder:validation:Optional -OriginProtocol *string `json:"originProtocol,omitempty" tf:"origin_protocol,omitempty"` + // SSL certificate of CDN resource. + // +kubebuilder:validation:Optional + SSLCertificate []SSLCertificateParameters `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` -// SSL certificate of CDN resource. -// +kubebuilder:validation:Optional -SSLCertificate []SSLCertificateParameters `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` + // list of secondary hostname strings. + // +kubebuilder:validation:Optional + // +listType=set + SecondaryHostnames []*string `json:"secondaryHostnames,omitempty" tf:"secondary_hostnames,omitempty"` -// list of secondary hostname strings. -// +kubebuilder:validation:Optional -// +listType=set -SecondaryHostnames []*string `json:"secondaryHostnames,omitempty" tf:"secondary_hostnames,omitempty"` - -// +kubebuilder:validation:Optional -UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` + // +kubebuilder:validation:Optional + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` } - type SSLCertificateInitParameters struct { + CertificateManagerID *string `json:"certificateManagerId,omitempty" tf:"certificate_manager_id,omitempty"` - -CertificateManagerID *string `json:"certificateManagerId,omitempty" tf:"certificate_manager_id,omitempty"` - -Type *string `json:"type,omitempty" tf:"type,omitempty"` + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type SSLCertificateObservation struct { + CertificateManagerID *string `json:"certificateManagerId,omitempty" tf:"certificate_manager_id,omitempty"` + Status *string `json:"status,omitempty" tf:"status,omitempty"` -CertificateManagerID *string `json:"certificateManagerId,omitempty" tf:"certificate_manager_id,omitempty"` - -Status *string `json:"status,omitempty" tf:"status,omitempty"` - -Type *string `json:"type,omitempty" tf:"type,omitempty"` + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type SSLCertificateParameters struct { + // +kubebuilder:validation:Optional + CertificateManagerID *string `json:"certificateManagerId,omitempty" tf:"certificate_manager_id,omitempty"` -// +kubebuilder:validation:Optional -CertificateManagerID *string `json:"certificateManagerId,omitempty" tf:"certificate_manager_id,omitempty"` - -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` } // ResourceSpec defines the desired state of Resource type ResourceSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider ResourceParameters `json:"forProvider"` + ForProvider ResourceParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -511,20 +480,19 @@ type ResourceSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider ResourceInitParameters `json:"initProvider,omitempty"` + InitProvider ResourceInitParameters `json:"initProvider,omitempty"` } // ResourceStatus defines the observed state of Resource. type ResourceStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider ResourceObservation `json:"atProvider,omitempty"` + AtProvider ResourceObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Resource is the Schema for the Resources API. Allows management of a Yandex.Cloud CDN Resource. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/cm/v1alpha1/zz_certificate_terraformed.go b/apis/cm/v1alpha1/zz_certificate_terraformed.go new file mode 100755 index 0000000..791640b --- /dev/null +++ b/apis/cm/v1alpha1/zz_certificate_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Certificate +func (mg *Certificate) GetTerraformResourceType() string { + return "yandex_cm_certificate" +} + +// GetConnectionDetailsMapping for this Certificate +func (tr *Certificate) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"self_managed[*].private_key": "selfManaged[*].privateKeySecretRef"} +} + +// GetObservation of this Certificate +func (tr *Certificate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Certificate +func (tr *Certificate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Certificate +func (tr *Certificate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Certificate +func (tr *Certificate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Certificate +func (tr *Certificate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Certificate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Certificate) LateInitialize(attrs []byte) (bool, error) { + params := &CertificateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Certificate) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cm/v1alpha1/zz_certificate_types.go b/apis/cm/v1alpha1/zz_certificate_types.go new file mode 100755 index 0000000..d11d0e4 --- /dev/null +++ b/apis/cm/v1alpha1/zz_certificate_types.go @@ -0,0 +1,347 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CertificateInitParameters struct { + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Certificate description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Domains for this certificate. Should be specified for managed certificates. + Domains []*string `json:"domains,omitempty" tf:"domains,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this certificate. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Managed specification. Structure is documented below. + Managed []ManagedInitParameters `json:"managed,omitempty" tf:"managed,omitempty"` + + // Certificate name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Self-managed specification. Structure is documented below. + SelfManaged []SelfManagedInitParameters `json:"selfManaged,omitempty" tf:"self_managed,omitempty"` +} + +type CertificateObservation struct { + + // Array of challenges. Structure is documented below. + Challenges []ChallengesObservation `json:"challenges,omitempty" tf:"challenges,omitempty"` + + // Certificate create timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Certificate description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Domains for this certificate. Should be specified for managed certificates. + Domains []*string `json:"domains,omitempty" tf:"domains,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Certificate Id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Certificate issue timestamp. + IssuedAt *string `json:"issuedAt,omitempty" tf:"issued_at,omitempty"` + + // Certificate issuer. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // Labels to assign to this certificate. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Managed specification. Structure is documented below. + Managed []ManagedObservation `json:"managed,omitempty" tf:"managed,omitempty"` + + // Certificate name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Certificate end valid period. + NotAfter *string `json:"notAfter,omitempty" tf:"not_after,omitempty"` + + // Certificate start valid period. + NotBefore *string `json:"notBefore,omitempty" tf:"not_before,omitempty"` + + // Self-managed specification. Structure is documented below. + SelfManaged []SelfManagedObservation `json:"selfManaged,omitempty" tf:"self_managed,omitempty"` + + // Certificate serial number. + Serial *string `json:"serial,omitempty" tf:"serial,omitempty"` + + // Certificate status: "VALIDATING", "INVALID", "ISSUED", "REVOKED", "RENEWING" or "RENEWAL_FAILED". + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Certificate subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // Certificate type: "MANAGED" or "IMPORTED". + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Certificate update timestamp. + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type CertificateParameters struct { + + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Certificate description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Domains for this certificate. Should be specified for managed certificates. + // +kubebuilder:validation:Optional + Domains []*string `json:"domains,omitempty" tf:"domains,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this certificate. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Managed specification. Structure is documented below. + // +kubebuilder:validation:Optional + Managed []ManagedParameters `json:"managed,omitempty" tf:"managed,omitempty"` + + // Certificate name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Self-managed specification. Structure is documented below. + // +kubebuilder:validation:Optional + SelfManaged []SelfManagedParameters `json:"selfManaged,omitempty" tf:"self_managed,omitempty"` +} + +type ChallengesInitParameters struct { +} + +type ChallengesObservation struct { + + // Time the challenge was created. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // DNS record name (only for DNS challenge). + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // DNS record type: "TXT" or "CNAME" (only for DNS challenge). + DNSType *string `json:"dnsType,omitempty" tf:"dns_type,omitempty"` + + // DNS record value (only for DNS challenge). + DNSValue *string `json:"dnsValue,omitempty" tf:"dns_value,omitempty"` + + // Validated domain. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The content that should be made accessible with the given http_url (only for HTTP challenge). + HTTPContent *string `json:"httpContent,omitempty" tf:"http_content,omitempty"` + + // URL where the challenge content http_content should be placed (only for HTTP challenge). + HTTPURL *string `json:"httpUrl,omitempty" tf:"http_url,omitempty"` + + // Current status message. + Message *string `json:"message,omitempty" tf:"message,omitempty"` + + // Challenge type "DNS" or "HTTP". + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Last time the challenge was updated. + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type ChallengesParameters struct { +} + +type ManagedInitParameters struct { + + // . Expected number of challenge count needed to validate certificate. Resource creation will fail if the specified value does not match the actual number of challenges received from issue provider. This argument is helpful for safe automatic resource creation for passing challenges for multi-domain certificates. + ChallengeCount *float64 `json:"challengeCount,omitempty" tf:"challenge_count,omitempty"` + + // Domain owner-check method. Possible values: + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` +} + +type ManagedObservation struct { + + // . Expected number of challenge count needed to validate certificate. Resource creation will fail if the specified value does not match the actual number of challenges received from issue provider. This argument is helpful for safe automatic resource creation for passing challenges for multi-domain certificates. + ChallengeCount *float64 `json:"challengeCount,omitempty" tf:"challenge_count,omitempty"` + + // Domain owner-check method. Possible values: + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` +} + +type ManagedParameters struct { + + // . Expected number of challenge count needed to validate certificate. Resource creation will fail if the specified value does not match the actual number of challenges received from issue provider. This argument is helpful for safe automatic resource creation for passing challenges for multi-domain certificates. + // +kubebuilder:validation:Optional + ChallengeCount *float64 `json:"challengeCount,omitempty" tf:"challenge_count,omitempty"` + + // Domain owner-check method. Possible values: + // +kubebuilder:validation:Optional + ChallengeType *string `json:"challengeType" tf:"challenge_type,omitempty"` +} + +type PrivateKeyLockboxSecretInitParameters struct { + + // Lockbox secret Id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key of the Lockbox secret, the value of which contains the private key of the certificate. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type PrivateKeyLockboxSecretObservation struct { + + // Lockbox secret Id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key of the Lockbox secret, the value of which contains the private key of the certificate. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type PrivateKeyLockboxSecretParameters struct { + + // Lockbox secret Id. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Key of the Lockbox secret, the value of which contains the private key of the certificate. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +type SelfManagedInitParameters struct { + + // Certificate with chain. + Certificate *string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Lockbox secret specification for getting private key. Structure is documented below. + PrivateKeyLockboxSecret []PrivateKeyLockboxSecretInitParameters `json:"privateKeyLockboxSecret,omitempty" tf:"private_key_lockbox_secret,omitempty"` + + // Private key of certificate. + PrivateKeySecretRef *v1.SecretKeySelector `json:"privateKeySecretRef,omitempty" tf:"-"` +} + +type SelfManagedObservation struct { + + // Certificate with chain. + Certificate *string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Lockbox secret specification for getting private key. Structure is documented below. + PrivateKeyLockboxSecret []PrivateKeyLockboxSecretObservation `json:"privateKeyLockboxSecret,omitempty" tf:"private_key_lockbox_secret,omitempty"` +} + +type SelfManagedParameters struct { + + // Certificate with chain. + // +kubebuilder:validation:Optional + Certificate *string `json:"certificate" tf:"certificate,omitempty"` + + // Lockbox secret specification for getting private key. Structure is documented below. + // +kubebuilder:validation:Optional + PrivateKeyLockboxSecret []PrivateKeyLockboxSecretParameters `json:"privateKeyLockboxSecret,omitempty" tf:"private_key_lockbox_secret,omitempty"` + + // Private key of certificate. + // +kubebuilder:validation:Optional + PrivateKeySecretRef *v1.SecretKeySelector `json:"privateKeySecretRef,omitempty" tf:"-"` +} + +// CertificateSpec defines the desired state of Certificate +type CertificateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CertificateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CertificateInitParameters `json:"initProvider,omitempty"` +} + +// CertificateStatus defines the observed state of Certificate. +type CertificateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CertificateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Certificate is the Schema for the Certificates API. A TLS certificate signed by a certification authority confirming that it belongs to the owner of the domain name. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Certificate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec CertificateSpec `json:"spec"` + Status CertificateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CertificateList contains a list of Certificates +type CertificateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Certificate `json:"items"` +} + +// Repository type metadata. +var ( + Certificate_Kind = "Certificate" + Certificate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Certificate_Kind}.String() + Certificate_KindAPIVersion = Certificate_Kind + "." + CRDGroupVersion.String() + Certificate_GroupVersionKind = CRDGroupVersion.WithKind(Certificate_Kind) +) + +func init() { + SchemeBuilder.Register(&Certificate{}, &CertificateList{}) +} diff --git a/apis/cm/v1alpha1/zz_generated.conversion_hubs.go b/apis/cm/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..8006de9 --- /dev/null +++ b/apis/cm/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Certificate) Hub() {} diff --git a/apis/cm/v1alpha1/zz_generated.deepcopy.go b/apis/cm/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b05de38 --- /dev/null +++ b/apis/cm/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,750 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Certificate) DeepCopyInto(out *Certificate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Certificate. +func (in *Certificate) DeepCopy() *Certificate { + if in == nil { + return nil + } + out := new(Certificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Certificate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = make([]ManagedInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelfManaged != nil { + in, out := &in.SelfManaged, &out.SelfManaged + *out = make([]SelfManagedInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateList) DeepCopyInto(out *CertificateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Certificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateList. +func (in *CertificateList) DeepCopy() *CertificateList { + if in == nil { + return nil + } + out := new(CertificateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.Challenges != nil { + in, out := &in.Challenges, &out.Challenges + *out = make([]ChallengesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IssuedAt != nil { + in, out := &in.IssuedAt, &out.IssuedAt + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = make([]ManagedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotAfter != nil { + in, out := &in.NotAfter, &out.NotAfter + *out = new(string) + **out = **in + } + if in.NotBefore != nil { + in, out := &in.NotBefore, &out.NotBefore + *out = new(string) + **out = **in + } + if in.SelfManaged != nil { + in, out := &in.SelfManaged, &out.SelfManaged + *out = make([]SelfManagedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Serial != nil { + in, out := &in.Serial, &out.Serial + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = make([]ManagedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelfManaged != nil { + in, out := &in.SelfManaged, &out.SelfManaged + *out = make([]SelfManagedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSpec. +func (in *CertificateSpec) DeepCopy() *CertificateSpec { + if in == nil { + return nil + } + out := new(CertificateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateStatus) DeepCopyInto(out *CertificateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateStatus. +func (in *CertificateStatus) DeepCopy() *CertificateStatus { + if in == nil { + return nil + } + out := new(CertificateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChallengesInitParameters) DeepCopyInto(out *ChallengesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChallengesInitParameters. +func (in *ChallengesInitParameters) DeepCopy() *ChallengesInitParameters { + if in == nil { + return nil + } + out := new(ChallengesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChallengesObservation) DeepCopyInto(out *ChallengesObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DNSType != nil { + in, out := &in.DNSType, &out.DNSType + *out = new(string) + **out = **in + } + if in.DNSValue != nil { + in, out := &in.DNSValue, &out.DNSValue + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.HTTPContent != nil { + in, out := &in.HTTPContent, &out.HTTPContent + *out = new(string) + **out = **in + } + if in.HTTPURL != nil { + in, out := &in.HTTPURL, &out.HTTPURL + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChallengesObservation. +func (in *ChallengesObservation) DeepCopy() *ChallengesObservation { + if in == nil { + return nil + } + out := new(ChallengesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChallengesParameters) DeepCopyInto(out *ChallengesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChallengesParameters. +func (in *ChallengesParameters) DeepCopy() *ChallengesParameters { + if in == nil { + return nil + } + out := new(ChallengesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedInitParameters) DeepCopyInto(out *ManagedInitParameters) { + *out = *in + if in.ChallengeCount != nil { + in, out := &in.ChallengeCount, &out.ChallengeCount + *out = new(float64) + **out = **in + } + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedInitParameters. +func (in *ManagedInitParameters) DeepCopy() *ManagedInitParameters { + if in == nil { + return nil + } + out := new(ManagedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedObservation) DeepCopyInto(out *ManagedObservation) { + *out = *in + if in.ChallengeCount != nil { + in, out := &in.ChallengeCount, &out.ChallengeCount + *out = new(float64) + **out = **in + } + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedObservation. +func (in *ManagedObservation) DeepCopy() *ManagedObservation { + if in == nil { + return nil + } + out := new(ManagedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedParameters) DeepCopyInto(out *ManagedParameters) { + *out = *in + if in.ChallengeCount != nil { + in, out := &in.ChallengeCount, &out.ChallengeCount + *out = new(float64) + **out = **in + } + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedParameters. +func (in *ManagedParameters) DeepCopy() *ManagedParameters { + if in == nil { + return nil + } + out := new(ManagedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateKeyLockboxSecretInitParameters) DeepCopyInto(out *PrivateKeyLockboxSecretInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateKeyLockboxSecretInitParameters. +func (in *PrivateKeyLockboxSecretInitParameters) DeepCopy() *PrivateKeyLockboxSecretInitParameters { + if in == nil { + return nil + } + out := new(PrivateKeyLockboxSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateKeyLockboxSecretObservation) DeepCopyInto(out *PrivateKeyLockboxSecretObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateKeyLockboxSecretObservation. +func (in *PrivateKeyLockboxSecretObservation) DeepCopy() *PrivateKeyLockboxSecretObservation { + if in == nil { + return nil + } + out := new(PrivateKeyLockboxSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateKeyLockboxSecretParameters) DeepCopyInto(out *PrivateKeyLockboxSecretParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateKeyLockboxSecretParameters. +func (in *PrivateKeyLockboxSecretParameters) DeepCopy() *PrivateKeyLockboxSecretParameters { + if in == nil { + return nil + } + out := new(PrivateKeyLockboxSecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedInitParameters) DeepCopyInto(out *SelfManagedInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.PrivateKeyLockboxSecret != nil { + in, out := &in.PrivateKeyLockboxSecret, &out.PrivateKeyLockboxSecret + *out = make([]PrivateKeyLockboxSecretInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateKeySecretRef != nil { + in, out := &in.PrivateKeySecretRef, &out.PrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedInitParameters. +func (in *SelfManagedInitParameters) DeepCopy() *SelfManagedInitParameters { + if in == nil { + return nil + } + out := new(SelfManagedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedObservation) DeepCopyInto(out *SelfManagedObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.PrivateKeyLockboxSecret != nil { + in, out := &in.PrivateKeyLockboxSecret, &out.PrivateKeyLockboxSecret + *out = make([]PrivateKeyLockboxSecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedObservation. +func (in *SelfManagedObservation) DeepCopy() *SelfManagedObservation { + if in == nil { + return nil + } + out := new(SelfManagedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedParameters) DeepCopyInto(out *SelfManagedParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.PrivateKeyLockboxSecret != nil { + in, out := &in.PrivateKeyLockboxSecret, &out.PrivateKeyLockboxSecret + *out = make([]PrivateKeyLockboxSecretParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateKeySecretRef != nil { + in, out := &in.PrivateKeySecretRef, &out.PrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedParameters. +func (in *SelfManagedParameters) DeepCopy() *SelfManagedParameters { + if in == nil { + return nil + } + out := new(SelfManagedParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cm/v1alpha1/zz_generated.resolvers.go b/apis/cm/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..5dcddb6 --- /dev/null +++ b/apis/cm/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Certificate. +func (mg *Certificate) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cm/v1alpha1/zz_groupversion_info.go b/apis/cm/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..bd1932e --- /dev/null +++ b/apis/cm/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cm.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cm.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/compute/v1alpha1/zz_disk_terraformed.go b/apis/compute/v1alpha1/zz_disk_terraformed.go new file mode 100755 index 0000000..63ff4d2 --- /dev/null +++ b/apis/compute/v1alpha1/zz_disk_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Disk +func (mg *Disk) GetTerraformResourceType() string { + return "yandex_compute_disk" +} + +// GetConnectionDetailsMapping for this Disk +func (tr *Disk) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Disk +func (tr *Disk) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Disk +func (tr *Disk) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Disk +func (tr *Disk) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Disk +func (tr *Disk) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Disk +func (tr *Disk) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Disk +func (tr *Disk) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Disk +func (tr *Disk) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Disk using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Disk) LateInitialize(attrs []byte) (bool, error) { + params := &DiskParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Disk) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_disk_types.go b/apis/compute/v1alpha1/zz_disk_types.go new file mode 100755 index 0000000..84256d2 --- /dev/null +++ b/apis/compute/v1alpha1/zz_disk_types.go @@ -0,0 +1,364 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiskInitParameters struct { + + // Default is 5 minutes. + AllowRecreate *bool `json:"allowRecreate,omitempty" tf:"allow_recreate,omitempty"` + + // Block size of the disk, specified in bytes. + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + + // Description of the disk. Provide this property when you create a resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Disk placement policy configuration. The structure is documented below. + DiskPlacementPolicy []DiskPlacementPolicyInitParameters `json:"diskPlacementPolicy,omitempty" tf:"disk_placement_policy,omitempty"` + + // The ID of the folder that the disk belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Hardware generation and its features, + // which will be applied to the instance when this disk is used as a boot + // disk. Provide this property if you wish to override this value, which + // otherwise is inherited from the source. The structure is documented below. + HardwareGeneration []HardwareGenerationInitParameters `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + // The source image to use for disk creation. + // +crossplane:generate:reference:type=Image + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Reference to a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDRef *v1.Reference `json:"imageIdRef,omitempty" tf:"-"` + + // Selector for a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDSelector *v1.Selector `json:"imageIdSelector,omitempty" tf:"-"` + + // Labels to assign to this disk. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the disk. Provide this property when you create a resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the image_id or snapshot_id parameter, or specify it alone to create an empty persistent disk. If you specify this field along with image_id or snapshot_id, the size value must not be less than the size of the source image or the size of the snapshot. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The source snapshot to use for disk creation. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Type of disk to create. Provide this when creating a disk. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Availability zone where the disk will reside. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type DiskObservation struct { + + // Default is 5 minutes. + AllowRecreate *bool `json:"allowRecreate,omitempty" tf:"allow_recreate,omitempty"` + + // Block size of the disk, specified in bytes. + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + + // Creation timestamp of the disk. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the disk. Provide this property when you create a resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Disk placement policy configuration. The structure is documented below. + DiskPlacementPolicy []DiskPlacementPolicyObservation `json:"diskPlacementPolicy,omitempty" tf:"disk_placement_policy,omitempty"` + + // The ID of the folder that the disk belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Hardware generation and its features, + // which will be applied to the instance when this disk is used as a boot + // disk. Provide this property if you wish to override this value, which + // otherwise is inherited from the source. The structure is documented below. + HardwareGeneration []HardwareGenerationObservation `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The source image to use for disk creation. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Labels to assign to this disk. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the disk. Provide this property when you create a resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + ProductIds []*string `json:"productIds,omitempty" tf:"product_ids,omitempty"` + + // Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the image_id or snapshot_id parameter, or specify it alone to create an empty persistent disk. If you specify this field along with image_id or snapshot_id, the size value must not be less than the size of the source image or the size of the snapshot. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The source snapshot to use for disk creation. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The status of the disk. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Type of disk to create. Provide this when creating a disk. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Availability zone where the disk will reside. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type DiskParameters struct { + + // Default is 5 minutes. + // +kubebuilder:validation:Optional + AllowRecreate *bool `json:"allowRecreate,omitempty" tf:"allow_recreate,omitempty"` + + // Block size of the disk, specified in bytes. + // +kubebuilder:validation:Optional + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + + // Description of the disk. Provide this property when you create a resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Disk placement policy configuration. The structure is documented below. + // +kubebuilder:validation:Optional + DiskPlacementPolicy []DiskPlacementPolicyParameters `json:"diskPlacementPolicy,omitempty" tf:"disk_placement_policy,omitempty"` + + // The ID of the folder that the disk belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Hardware generation and its features, + // which will be applied to the instance when this disk is used as a boot + // disk. Provide this property if you wish to override this value, which + // otherwise is inherited from the source. The structure is documented below. + // +kubebuilder:validation:Optional + HardwareGeneration []HardwareGenerationParameters `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + // The source image to use for disk creation. + // +crossplane:generate:reference:type=Image + // +kubebuilder:validation:Optional + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Reference to a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDRef *v1.Reference `json:"imageIdRef,omitempty" tf:"-"` + + // Selector for a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDSelector *v1.Selector `json:"imageIdSelector,omitempty" tf:"-"` + + // Labels to assign to this disk. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the disk. Provide this property when you create a resource. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the image_id or snapshot_id parameter, or specify it alone to create an empty persistent disk. If you specify this field along with image_id or snapshot_id, the size value must not be less than the size of the source image or the size of the snapshot. + // +kubebuilder:validation:Optional + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The source snapshot to use for disk creation. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Type of disk to create. Provide this when creating a disk. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Availability zone where the disk will reside. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type DiskPlacementPolicyInitParameters struct { + + // Specifies Disk Placement Group id. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1.DiskPlacementGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DiskPlacementGroupID *string `json:"diskPlacementGroupId,omitempty" tf:"disk_placement_group_id,omitempty"` + + // Reference to a DiskPlacementGroup in compute to populate diskPlacementGroupId. + // +kubebuilder:validation:Optional + DiskPlacementGroupIDRef *v1.Reference `json:"diskPlacementGroupIdRef,omitempty" tf:"-"` + + // Selector for a DiskPlacementGroup in compute to populate diskPlacementGroupId. + // +kubebuilder:validation:Optional + DiskPlacementGroupIDSelector *v1.Selector `json:"diskPlacementGroupIdSelector,omitempty" tf:"-"` +} + +type DiskPlacementPolicyObservation struct { + + // Specifies Disk Placement Group id. + DiskPlacementGroupID *string `json:"diskPlacementGroupId,omitempty" tf:"disk_placement_group_id,omitempty"` +} + +type DiskPlacementPolicyParameters struct { + + // Specifies Disk Placement Group id. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1.DiskPlacementGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DiskPlacementGroupID *string `json:"diskPlacementGroupId,omitempty" tf:"disk_placement_group_id,omitempty"` + + // Reference to a DiskPlacementGroup in compute to populate diskPlacementGroupId. + // +kubebuilder:validation:Optional + DiskPlacementGroupIDRef *v1.Reference `json:"diskPlacementGroupIdRef,omitempty" tf:"-"` + + // Selector for a DiskPlacementGroup in compute to populate diskPlacementGroupId. + // +kubebuilder:validation:Optional + DiskPlacementGroupIDSelector *v1.Selector `json:"diskPlacementGroupIdSelector,omitempty" tf:"-"` +} + +type Generation2FeaturesInitParameters struct { +} + +type Generation2FeaturesObservation struct { +} + +type Generation2FeaturesParameters struct { +} + +type HardwareGenerationInitParameters struct { + + // A newer hardware generation, which always uses PCI_TOPOLOGY_V2 and UEFI boot. + Generation2Features []Generation2FeaturesInitParameters `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + // Defines the first known hardware generation and its features, which are: + LegacyFeatures []LegacyFeaturesInitParameters `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type HardwareGenerationObservation struct { + + // A newer hardware generation, which always uses PCI_TOPOLOGY_V2 and UEFI boot. + Generation2Features []Generation2FeaturesParameters `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + // Defines the first known hardware generation and its features, which are: + LegacyFeatures []LegacyFeaturesObservation `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type HardwareGenerationParameters struct { + + // A newer hardware generation, which always uses PCI_TOPOLOGY_V2 and UEFI boot. + // +kubebuilder:validation:Optional + Generation2Features []Generation2FeaturesParameters `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + // Defines the first known hardware generation and its features, which are: + // +kubebuilder:validation:Optional + LegacyFeatures []LegacyFeaturesParameters `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type LegacyFeaturesInitParameters struct { + + // A variant of PCI topology, one of PCI_TOPOLOGY_V1 or PCI_TOPOLOGY_V2. + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +type LegacyFeaturesObservation struct { + + // A variant of PCI topology, one of PCI_TOPOLOGY_V1 or PCI_TOPOLOGY_V2. + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +type LegacyFeaturesParameters struct { + + // A variant of PCI topology, one of PCI_TOPOLOGY_V1 or PCI_TOPOLOGY_V2. + // +kubebuilder:validation:Optional + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +// DiskSpec defines the desired state of Disk +type DiskSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DiskParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DiskInitParameters `json:"initProvider,omitempty"` +} + +// DiskStatus defines the observed state of Disk. +type DiskStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DiskObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Disk is the Schema for the Disks API. Persistent disks are durable storage devices that function similarly to the physical disks in a desktop or a server. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Disk struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DiskSpec `json:"spec"` + Status DiskStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DiskList contains a list of Disks +type DiskList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Disk `json:"items"` +} + +// Repository type metadata. +var ( + Disk_Kind = "Disk" + Disk_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Disk_Kind}.String() + Disk_KindAPIVersion = Disk_Kind + "." + CRDGroupVersion.String() + Disk_GroupVersionKind = CRDGroupVersion.WithKind(Disk_Kind) +) + +func init() { + SchemeBuilder.Register(&Disk{}, &DiskList{}) +} diff --git a/apis/compute/v1alpha1/zz_diskiambinding_terraformed.go b/apis/compute/v1alpha1/zz_diskiambinding_terraformed.go new file mode 100755 index 0000000..bf9f07d --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DiskIAMBinding +func (mg *DiskIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_disk_iam_binding" +} + +// GetConnectionDetailsMapping for this DiskIAMBinding +func (tr *DiskIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DiskIAMBinding +func (tr *DiskIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DiskIAMBinding +func (tr *DiskIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DiskIAMBinding +func (tr *DiskIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DiskIAMBinding +func (tr *DiskIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DiskIAMBinding +func (tr *DiskIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DiskIAMBinding +func (tr *DiskIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DiskIAMBinding +func (tr *DiskIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DiskIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DiskIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &DiskIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DiskIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_diskiambinding_types.go b/apis/compute/v1alpha1/zz_diskiambinding_types.go new file mode 100755 index 0000000..65a86d8 --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskiambinding_types.go @@ -0,0 +1,118 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiskIAMBindingInitParameters struct { + + // ID of the disk to attach the policy to. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_disk_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type DiskIAMBindingObservation struct { + + // ID of the disk to attach the policy to. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_disk_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type DiskIAMBindingParameters struct { + + // ID of the disk to attach the policy to. + // +kubebuilder:validation:Optional + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_disk_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// DiskIAMBindingSpec defines the desired state of DiskIAMBinding +type DiskIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DiskIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DiskIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// DiskIAMBindingStatus defines the observed state of DiskIAMBinding. +type DiskIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DiskIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// DiskIAMBinding is the Schema for the DiskIAMBindings API. Allows management of a single IAM binding for a Disk. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type DiskIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.diskId) || (has(self.initProvider) && has(self.initProvider.diskId))",message="spec.forProvider.diskId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec DiskIAMBindingSpec `json:"spec"` + Status DiskIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DiskIAMBindingList contains a list of DiskIAMBindings +type DiskIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DiskIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + DiskIAMBinding_Kind = "DiskIAMBinding" + DiskIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DiskIAMBinding_Kind}.String() + DiskIAMBinding_KindAPIVersion = DiskIAMBinding_Kind + "." + CRDGroupVersion.String() + DiskIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(DiskIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&DiskIAMBinding{}, &DiskIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_diskplacementgroup_terraformed.go b/apis/compute/v1alpha1/zz_diskplacementgroup_terraformed.go new file mode 100755 index 0000000..ca88b90 --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskplacementgroup_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DiskPlacementGroup +func (mg *DiskPlacementGroup) GetTerraformResourceType() string { + return "yandex_compute_disk_placement_group" +} + +// GetConnectionDetailsMapping for this DiskPlacementGroup +func (tr *DiskPlacementGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DiskPlacementGroup +func (tr *DiskPlacementGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DiskPlacementGroup +func (tr *DiskPlacementGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DiskPlacementGroup +func (tr *DiskPlacementGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DiskPlacementGroup +func (tr *DiskPlacementGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DiskPlacementGroup +func (tr *DiskPlacementGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DiskPlacementGroup +func (tr *DiskPlacementGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DiskPlacementGroup +func (tr *DiskPlacementGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DiskPlacementGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DiskPlacementGroup) LateInitialize(attrs []byte) (bool, error) { + params := &DiskPlacementGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DiskPlacementGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_diskplacementgroup_types.go b/apis/compute/v1alpha1/zz_diskplacementgroup_types.go new file mode 100755 index 0000000..6017ae4 --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskplacementgroup_types.go @@ -0,0 +1,157 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiskPlacementGroupInitParameters struct { + + // A description of the Disk Placement Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Disk Placement Group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the Disk Placement Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the zone where the Disk Placement Group resides. Default is ru-central1-b + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type DiskPlacementGroupObservation struct { + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // A description of the Disk Placement Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the Disk Placement Group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the Disk Placement Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Status of the Disk Placement Group. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // ID of the zone where the Disk Placement Group resides. Default is ru-central1-b + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type DiskPlacementGroupParameters struct { + + // A description of the Disk Placement Group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Disk Placement Group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the Disk Placement Group. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the zone where the Disk Placement Group resides. Default is ru-central1-b + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +// DiskPlacementGroupSpec defines the desired state of DiskPlacementGroup +type DiskPlacementGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DiskPlacementGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DiskPlacementGroupInitParameters `json:"initProvider,omitempty"` +} + +// DiskPlacementGroupStatus defines the observed state of DiskPlacementGroup. +type DiskPlacementGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DiskPlacementGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// DiskPlacementGroup is the Schema for the DiskPlacementGroups API. Manages a Disk Placement Group resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type DiskPlacementGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DiskPlacementGroupSpec `json:"spec"` + Status DiskPlacementGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DiskPlacementGroupList contains a list of DiskPlacementGroups +type DiskPlacementGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DiskPlacementGroup `json:"items"` +} + +// Repository type metadata. +var ( + DiskPlacementGroup_Kind = "DiskPlacementGroup" + DiskPlacementGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DiskPlacementGroup_Kind}.String() + DiskPlacementGroup_KindAPIVersion = DiskPlacementGroup_Kind + "." + CRDGroupVersion.String() + DiskPlacementGroup_GroupVersionKind = CRDGroupVersion.WithKind(DiskPlacementGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&DiskPlacementGroup{}, &DiskPlacementGroupList{}) +} diff --git a/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_terraformed.go b/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_terraformed.go new file mode 100755 index 0000000..395748b --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DiskPlacementGroupIAMBinding +func (mg *DiskPlacementGroupIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_disk_placement_group_iam_binding" +} + +// GetConnectionDetailsMapping for this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DiskPlacementGroupIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DiskPlacementGroupIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &DiskPlacementGroupIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DiskPlacementGroupIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_types.go b/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_types.go new file mode 100755 index 0000000..898704f --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_types.go @@ -0,0 +1,118 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiskPlacementGroupIAMBindingInitParameters struct { + + // ID of the disk placement group to attach the policy to. + DiskPlacementGroupID *string `json:"diskPlacementGroupId,omitempty" tf:"disk_placement_group_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_disk_placement_group_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type DiskPlacementGroupIAMBindingObservation struct { + + // ID of the disk placement group to attach the policy to. + DiskPlacementGroupID *string `json:"diskPlacementGroupId,omitempty" tf:"disk_placement_group_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_disk_placement_group_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type DiskPlacementGroupIAMBindingParameters struct { + + // ID of the disk placement group to attach the policy to. + // +kubebuilder:validation:Optional + DiskPlacementGroupID *string `json:"diskPlacementGroupId,omitempty" tf:"disk_placement_group_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_disk_placement_group_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// DiskPlacementGroupIAMBindingSpec defines the desired state of DiskPlacementGroupIAMBinding +type DiskPlacementGroupIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DiskPlacementGroupIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DiskPlacementGroupIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// DiskPlacementGroupIAMBindingStatus defines the observed state of DiskPlacementGroupIAMBinding. +type DiskPlacementGroupIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DiskPlacementGroupIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// DiskPlacementGroupIAMBinding is the Schema for the DiskPlacementGroupIAMBindings API. Allows management of a single IAM binding for a Disk Placement Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type DiskPlacementGroupIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.diskPlacementGroupId) || (has(self.initProvider) && has(self.initProvider.diskPlacementGroupId))",message="spec.forProvider.diskPlacementGroupId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec DiskPlacementGroupIAMBindingSpec `json:"spec"` + Status DiskPlacementGroupIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DiskPlacementGroupIAMBindingList contains a list of DiskPlacementGroupIAMBindings +type DiskPlacementGroupIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DiskPlacementGroupIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + DiskPlacementGroupIAMBinding_Kind = "DiskPlacementGroupIAMBinding" + DiskPlacementGroupIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DiskPlacementGroupIAMBinding_Kind}.String() + DiskPlacementGroupIAMBinding_KindAPIVersion = DiskPlacementGroupIAMBinding_Kind + "." + CRDGroupVersion.String() + DiskPlacementGroupIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(DiskPlacementGroupIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&DiskPlacementGroupIAMBinding{}, &DiskPlacementGroupIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_filesystem_terraformed.go b/apis/compute/v1alpha1/zz_filesystem_terraformed.go new file mode 100755 index 0000000..e1fe7d2 --- /dev/null +++ b/apis/compute/v1alpha1/zz_filesystem_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Filesystem +func (mg *Filesystem) GetTerraformResourceType() string { + return "yandex_compute_filesystem" +} + +// GetConnectionDetailsMapping for this Filesystem +func (tr *Filesystem) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Filesystem +func (tr *Filesystem) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Filesystem +func (tr *Filesystem) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Filesystem +func (tr *Filesystem) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Filesystem +func (tr *Filesystem) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Filesystem +func (tr *Filesystem) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Filesystem +func (tr *Filesystem) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Filesystem +func (tr *Filesystem) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Filesystem using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Filesystem) LateInitialize(attrs []byte) (bool, error) { + params := &FilesystemParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Filesystem) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_filesystem_types.go b/apis/compute/v1alpha1/zz_filesystem_types.go new file mode 100755 index 0000000..48e3eee --- /dev/null +++ b/apis/compute/v1alpha1/zz_filesystem_types.go @@ -0,0 +1,189 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FilesystemInitParameters struct { + + // Block size of the filesystem, specified in bytes. + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + + // Description of the filesystem. Provide this property when you create a resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the filesystem belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this filesystem. A list of key/value pairs. For details about the concept, see documentation. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the filesystem. Provide this property when you create a resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Size of the filesystem, specified in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // Type of filesystem to create. Type network-hdd is set by default. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Availability zone where the filesystem will reside. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type FilesystemObservation struct { + + // Block size of the filesystem, specified in bytes. + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + + // Creation timestamp of the filesystem. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the filesystem. Provide this property when you create a resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the filesystem belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to assign to this filesystem. A list of key/value pairs. For details about the concept, see documentation. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the filesystem. Provide this property when you create a resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Size of the filesystem, specified in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The status of the filesystem. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Type of filesystem to create. Type network-hdd is set by default. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Availability zone where the filesystem will reside. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type FilesystemParameters struct { + + // Block size of the filesystem, specified in bytes. + // +kubebuilder:validation:Optional + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + + // Description of the filesystem. Provide this property when you create a resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the filesystem belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this filesystem. A list of key/value pairs. For details about the concept, see documentation. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the filesystem. Provide this property when you create a resource. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Size of the filesystem, specified in GB. + // +kubebuilder:validation:Optional + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // Type of filesystem to create. Type network-hdd is set by default. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Availability zone where the filesystem will reside. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +// FilesystemSpec defines the desired state of Filesystem +type FilesystemSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FilesystemParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FilesystemInitParameters `json:"initProvider,omitempty"` +} + +// FilesystemStatus defines the observed state of Filesystem. +type FilesystemStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FilesystemObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Filesystem is the Schema for the Filesystems API. File storage is a virtual file system that can be attached to multiple Compute Cloud VMs in the same availability zone. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Filesystem struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FilesystemSpec `json:"spec"` + Status FilesystemStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FilesystemList contains a list of Filesystems +type FilesystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Filesystem `json:"items"` +} + +// Repository type metadata. +var ( + Filesystem_Kind = "Filesystem" + Filesystem_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Filesystem_Kind}.String() + Filesystem_KindAPIVersion = Filesystem_Kind + "." + CRDGroupVersion.String() + Filesystem_GroupVersionKind = CRDGroupVersion.WithKind(Filesystem_Kind) +) + +func init() { + SchemeBuilder.Register(&Filesystem{}, &FilesystemList{}) +} diff --git a/apis/compute/v1alpha1/zz_filesystemiambinding_terraformed.go b/apis/compute/v1alpha1/zz_filesystemiambinding_terraformed.go new file mode 100755 index 0000000..afc7bb9 --- /dev/null +++ b/apis/compute/v1alpha1/zz_filesystemiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FilesystemIAMBinding +func (mg *FilesystemIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_filesystem_iam_binding" +} + +// GetConnectionDetailsMapping for this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FilesystemIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FilesystemIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &FilesystemIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FilesystemIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_filesystemiambinding_types.go b/apis/compute/v1alpha1/zz_filesystemiambinding_types.go new file mode 100755 index 0000000..612c656 --- /dev/null +++ b/apis/compute/v1alpha1/zz_filesystemiambinding_types.go @@ -0,0 +1,118 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FilesystemIAMBindingInitParameters struct { + + // ID of the filesystem to attach the policy to. + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_filesystem_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type FilesystemIAMBindingObservation struct { + + // ID of the filesystem to attach the policy to. + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_filesystem_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type FilesystemIAMBindingParameters struct { + + // ID of the filesystem to attach the policy to. + // +kubebuilder:validation:Optional + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_filesystem_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// FilesystemIAMBindingSpec defines the desired state of FilesystemIAMBinding +type FilesystemIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FilesystemIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FilesystemIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// FilesystemIAMBindingStatus defines the observed state of FilesystemIAMBinding. +type FilesystemIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FilesystemIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// FilesystemIAMBinding is the Schema for the FilesystemIAMBindings API. Allows management of a single IAM binding for a Filesystem. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type FilesystemIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.filesystemId) || (has(self.initProvider) && has(self.initProvider.filesystemId))",message="spec.forProvider.filesystemId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec FilesystemIAMBindingSpec `json:"spec"` + Status FilesystemIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FilesystemIAMBindingList contains a list of FilesystemIAMBindings +type FilesystemIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FilesystemIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + FilesystemIAMBinding_Kind = "FilesystemIAMBinding" + FilesystemIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FilesystemIAMBinding_Kind}.String() + FilesystemIAMBinding_KindAPIVersion = FilesystemIAMBinding_Kind + "." + CRDGroupVersion.String() + FilesystemIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(FilesystemIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&FilesystemIAMBinding{}, &FilesystemIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_generated.conversion_hubs.go b/apis/compute/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..5f1200c --- /dev/null +++ b/apis/compute/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,60 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Disk) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DiskIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DiskPlacementGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DiskPlacementGroupIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Filesystem) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FilesystemIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GpuCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GpuClusterIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Image) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ImageIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Instance) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *InstanceGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *InstanceIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PlacementGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PlacementGroupIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Snapshot) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SnapshotIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SnapshotSchedule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SnapshotScheduleIAMBinding) Hub() {} diff --git a/apis/compute/v1alpha1/zz_generated.deepcopy.go b/apis/compute/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..bd62306 --- /dev/null +++ b/apis/compute/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,12661 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyInitParameters) DeepCopyInto(out *AllocationPolicyInitParameters) { + *out = *in + if in.InstanceTagsPool != nil { + in, out := &in.InstanceTagsPool, &out.InstanceTagsPool + *out = make([]InstanceTagsPoolInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyInitParameters. +func (in *AllocationPolicyInitParameters) DeepCopy() *AllocationPolicyInitParameters { + if in == nil { + return nil + } + out := new(AllocationPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyObservation) DeepCopyInto(out *AllocationPolicyObservation) { + *out = *in + if in.InstanceTagsPool != nil { + in, out := &in.InstanceTagsPool, &out.InstanceTagsPool + *out = make([]InstanceTagsPoolObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyObservation. +func (in *AllocationPolicyObservation) DeepCopy() *AllocationPolicyObservation { + if in == nil { + return nil + } + out := new(AllocationPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyParameters) DeepCopyInto(out *AllocationPolicyParameters) { + *out = *in + if in.InstanceTagsPool != nil { + in, out := &in.InstanceTagsPool, &out.InstanceTagsPool + *out = make([]InstanceTagsPoolParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyParameters. +func (in *AllocationPolicyParameters) DeepCopy() *AllocationPolicyParameters { + if in == nil { + return nil + } + out := new(AllocationPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationLoadBalancerInitParameters) DeepCopyInto(out *ApplicationLoadBalancerInitParameters) { + *out = *in + if in.IgnoreHealthChecks != nil { + in, out := &in.IgnoreHealthChecks, &out.IgnoreHealthChecks + *out = new(bool) + **out = **in + } + if in.MaxOpeningTrafficDuration != nil { + in, out := &in.MaxOpeningTrafficDuration, &out.MaxOpeningTrafficDuration + *out = new(float64) + **out = **in + } + if in.TargetGroupDescription != nil { + in, out := &in.TargetGroupDescription, &out.TargetGroupDescription + *out = new(string) + **out = **in + } + if in.TargetGroupLabels != nil { + in, out := &in.TargetGroupLabels, &out.TargetGroupLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetGroupName != nil { + in, out := &in.TargetGroupName, &out.TargetGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationLoadBalancerInitParameters. +func (in *ApplicationLoadBalancerInitParameters) DeepCopy() *ApplicationLoadBalancerInitParameters { + if in == nil { + return nil + } + out := new(ApplicationLoadBalancerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationLoadBalancerObservation) DeepCopyInto(out *ApplicationLoadBalancerObservation) { + *out = *in + if in.IgnoreHealthChecks != nil { + in, out := &in.IgnoreHealthChecks, &out.IgnoreHealthChecks + *out = new(bool) + **out = **in + } + if in.MaxOpeningTrafficDuration != nil { + in, out := &in.MaxOpeningTrafficDuration, &out.MaxOpeningTrafficDuration + *out = new(float64) + **out = **in + } + if in.StatusMessage != nil { + in, out := &in.StatusMessage, &out.StatusMessage + *out = new(string) + **out = **in + } + if in.TargetGroupDescription != nil { + in, out := &in.TargetGroupDescription, &out.TargetGroupDescription + *out = new(string) + **out = **in + } + if in.TargetGroupID != nil { + in, out := &in.TargetGroupID, &out.TargetGroupID + *out = new(string) + **out = **in + } + if in.TargetGroupLabels != nil { + in, out := &in.TargetGroupLabels, &out.TargetGroupLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetGroupName != nil { + in, out := &in.TargetGroupName, &out.TargetGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationLoadBalancerObservation. +func (in *ApplicationLoadBalancerObservation) DeepCopy() *ApplicationLoadBalancerObservation { + if in == nil { + return nil + } + out := new(ApplicationLoadBalancerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationLoadBalancerParameters) DeepCopyInto(out *ApplicationLoadBalancerParameters) { + *out = *in + if in.IgnoreHealthChecks != nil { + in, out := &in.IgnoreHealthChecks, &out.IgnoreHealthChecks + *out = new(bool) + **out = **in + } + if in.MaxOpeningTrafficDuration != nil { + in, out := &in.MaxOpeningTrafficDuration, &out.MaxOpeningTrafficDuration + *out = new(float64) + **out = **in + } + if in.TargetGroupDescription != nil { + in, out := &in.TargetGroupDescription, &out.TargetGroupDescription + *out = new(string) + **out = **in + } + if in.TargetGroupLabels != nil { + in, out := &in.TargetGroupLabels, &out.TargetGroupLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetGroupName != nil { + in, out := &in.TargetGroupName, &out.TargetGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationLoadBalancerParameters. +func (in *ApplicationLoadBalancerParameters) DeepCopy() *ApplicationLoadBalancerParameters { + if in == nil { + return nil + } + out := new(ApplicationLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaleInitParameters) DeepCopyInto(out *AutoScaleInitParameters) { + *out = *in + if in.AutoScaleType != nil { + in, out := &in.AutoScaleType, &out.AutoScaleType + *out = new(string) + **out = **in + } + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(float64) + **out = **in + } + if in.CustomRule != nil { + in, out := &in.CustomRule, &out.CustomRule + *out = make([]CustomRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitialSize != nil { + in, out := &in.InitialSize, &out.InitialSize + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(float64) + **out = **in + } + if in.MinZoneSize != nil { + in, out := &in.MinZoneSize, &out.MinZoneSize + *out = new(float64) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(float64) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaleInitParameters. +func (in *AutoScaleInitParameters) DeepCopy() *AutoScaleInitParameters { + if in == nil { + return nil + } + out := new(AutoScaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaleObservation) DeepCopyInto(out *AutoScaleObservation) { + *out = *in + if in.AutoScaleType != nil { + in, out := &in.AutoScaleType, &out.AutoScaleType + *out = new(string) + **out = **in + } + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(float64) + **out = **in + } + if in.CustomRule != nil { + in, out := &in.CustomRule, &out.CustomRule + *out = make([]CustomRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitialSize != nil { + in, out := &in.InitialSize, &out.InitialSize + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(float64) + **out = **in + } + if in.MinZoneSize != nil { + in, out := &in.MinZoneSize, &out.MinZoneSize + *out = new(float64) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(float64) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaleObservation. +func (in *AutoScaleObservation) DeepCopy() *AutoScaleObservation { + if in == nil { + return nil + } + out := new(AutoScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaleParameters) DeepCopyInto(out *AutoScaleParameters) { + *out = *in + if in.AutoScaleType != nil { + in, out := &in.AutoScaleType, &out.AutoScaleType + *out = new(string) + **out = **in + } + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(float64) + **out = **in + } + if in.CustomRule != nil { + in, out := &in.CustomRule, &out.CustomRule + *out = make([]CustomRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitialSize != nil { + in, out := &in.InitialSize, &out.InitialSize + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(float64) + **out = **in + } + if in.MinZoneSize != nil { + in, out := &in.MinZoneSize, &out.MinZoneSize + *out = new(float64) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(float64) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaleParameters. +func (in *AutoScaleParameters) DeepCopy() *AutoScaleParameters { + if in == nil { + return nil + } + out := new(AutoScaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskInitParameters) DeepCopyInto(out *BootDiskInitParameters) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.DiskIDRef != nil { + in, out := &in.DiskIDRef, &out.DiskIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DiskIDSelector != nil { + in, out := &in.DiskIDSelector, &out.DiskIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]InitializeParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskInitParameters. +func (in *BootDiskInitParameters) DeepCopy() *BootDiskInitParameters { + if in == nil { + return nil + } + out := new(BootDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskInitializeParamsInitParameters) DeepCopyInto(out *BootDiskInitializeParamsInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.ImageIDRef != nil { + in, out := &in.ImageIDRef, &out.ImageIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageIDSelector != nil { + in, out := &in.ImageIDSelector, &out.ImageIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskInitializeParamsInitParameters. +func (in *BootDiskInitializeParamsInitParameters) DeepCopy() *BootDiskInitializeParamsInitParameters { + if in == nil { + return nil + } + out := new(BootDiskInitializeParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskInitializeParamsObservation) DeepCopyInto(out *BootDiskInitializeParamsObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskInitializeParamsObservation. +func (in *BootDiskInitializeParamsObservation) DeepCopy() *BootDiskInitializeParamsObservation { + if in == nil { + return nil + } + out := new(BootDiskInitializeParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskInitializeParamsParameters) DeepCopyInto(out *BootDiskInitializeParamsParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.ImageIDRef != nil { + in, out := &in.ImageIDRef, &out.ImageIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageIDSelector != nil { + in, out := &in.ImageIDSelector, &out.ImageIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskInitializeParamsParameters. +func (in *BootDiskInitializeParamsParameters) DeepCopy() *BootDiskInitializeParamsParameters { + if in == nil { + return nil + } + out := new(BootDiskInitializeParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskObservation) DeepCopyInto(out *BootDiskObservation) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]InitializeParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskObservation. +func (in *BootDiskObservation) DeepCopy() *BootDiskObservation { + if in == nil { + return nil + } + out := new(BootDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskParameters) DeepCopyInto(out *BootDiskParameters) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.DiskIDRef != nil { + in, out := &in.DiskIDRef, &out.DiskIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DiskIDSelector != nil { + in, out := &in.DiskIDSelector, &out.DiskIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]InitializeParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskParameters. +func (in *BootDiskParameters) DeepCopy() *BootDiskParameters { + if in == nil { + return nil + } + out := new(BootDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRuleInitParameters) DeepCopyInto(out *CustomRuleInitParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricType != nil { + in, out := &in.MetricType, &out.MetricType + *out = new(string) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRuleInitParameters. +func (in *CustomRuleInitParameters) DeepCopy() *CustomRuleInitParameters { + if in == nil { + return nil + } + out := new(CustomRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRuleObservation) DeepCopyInto(out *CustomRuleObservation) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricType != nil { + in, out := &in.MetricType, &out.MetricType + *out = new(string) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRuleObservation. +func (in *CustomRuleObservation) DeepCopy() *CustomRuleObservation { + if in == nil { + return nil + } + out := new(CustomRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRuleParameters) DeepCopyInto(out *CustomRuleParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricType != nil { + in, out := &in.MetricType, &out.MetricType + *out = new(string) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRuleParameters. +func (in *CustomRuleParameters) DeepCopy() *CustomRuleParameters { + if in == nil { + return nil + } + out := new(CustomRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSRecordInitParameters) DeepCopyInto(out *DNSRecordInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSRecordInitParameters. +func (in *DNSRecordInitParameters) DeepCopy() *DNSRecordInitParameters { + if in == nil { + return nil + } + out := new(DNSRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSRecordObservation) DeepCopyInto(out *DNSRecordObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSRecordObservation. +func (in *DNSRecordObservation) DeepCopy() *DNSRecordObservation { + if in == nil { + return nil + } + out := new(DNSRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSRecordParameters) DeepCopyInto(out *DNSRecordParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSRecordParameters. +func (in *DNSRecordParameters) DeepCopy() *DNSRecordParameters { + if in == nil { + return nil + } + out := new(DNSRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeployPolicyInitParameters) DeepCopyInto(out *DeployPolicyInitParameters) { + *out = *in + if in.MaxCreating != nil { + in, out := &in.MaxCreating, &out.MaxCreating + *out = new(float64) + **out = **in + } + if in.MaxDeleting != nil { + in, out := &in.MaxDeleting, &out.MaxDeleting + *out = new(float64) + **out = **in + } + if in.MaxExpansion != nil { + in, out := &in.MaxExpansion, &out.MaxExpansion + *out = new(float64) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(float64) + **out = **in + } + if in.StartupDuration != nil { + in, out := &in.StartupDuration, &out.StartupDuration + *out = new(float64) + **out = **in + } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployPolicyInitParameters. +func (in *DeployPolicyInitParameters) DeepCopy() *DeployPolicyInitParameters { + if in == nil { + return nil + } + out := new(DeployPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeployPolicyObservation) DeepCopyInto(out *DeployPolicyObservation) { + *out = *in + if in.MaxCreating != nil { + in, out := &in.MaxCreating, &out.MaxCreating + *out = new(float64) + **out = **in + } + if in.MaxDeleting != nil { + in, out := &in.MaxDeleting, &out.MaxDeleting + *out = new(float64) + **out = **in + } + if in.MaxExpansion != nil { + in, out := &in.MaxExpansion, &out.MaxExpansion + *out = new(float64) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(float64) + **out = **in + } + if in.StartupDuration != nil { + in, out := &in.StartupDuration, &out.StartupDuration + *out = new(float64) + **out = **in + } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployPolicyObservation. +func (in *DeployPolicyObservation) DeepCopy() *DeployPolicyObservation { + if in == nil { + return nil + } + out := new(DeployPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeployPolicyParameters) DeepCopyInto(out *DeployPolicyParameters) { + *out = *in + if in.MaxCreating != nil { + in, out := &in.MaxCreating, &out.MaxCreating + *out = new(float64) + **out = **in + } + if in.MaxDeleting != nil { + in, out := &in.MaxDeleting, &out.MaxDeleting + *out = new(float64) + **out = **in + } + if in.MaxExpansion != nil { + in, out := &in.MaxExpansion, &out.MaxExpansion + *out = new(float64) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(float64) + **out = **in + } + if in.StartupDuration != nil { + in, out := &in.StartupDuration, &out.StartupDuration + *out = new(float64) + **out = **in + } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployPolicyParameters. +func (in *DeployPolicyParameters) DeepCopy() *DeployPolicyParameters { + if in == nil { + return nil + } + out := new(DeployPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Disk) DeepCopyInto(out *Disk) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Disk. +func (in *Disk) DeepCopy() *Disk { + if in == nil { + return nil + } + out := new(Disk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Disk) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBinding) DeepCopyInto(out *DiskIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBinding. +func (in *DiskIAMBinding) DeepCopy() *DiskIAMBinding { + if in == nil { + return nil + } + out := new(DiskIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingInitParameters) DeepCopyInto(out *DiskIAMBindingInitParameters) { + *out = *in + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingInitParameters. +func (in *DiskIAMBindingInitParameters) DeepCopy() *DiskIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(DiskIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingList) DeepCopyInto(out *DiskIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DiskIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingList. +func (in *DiskIAMBindingList) DeepCopy() *DiskIAMBindingList { + if in == nil { + return nil + } + out := new(DiskIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingObservation) DeepCopyInto(out *DiskIAMBindingObservation) { + *out = *in + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingObservation. +func (in *DiskIAMBindingObservation) DeepCopy() *DiskIAMBindingObservation { + if in == nil { + return nil + } + out := new(DiskIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingParameters) DeepCopyInto(out *DiskIAMBindingParameters) { + *out = *in + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingParameters. +func (in *DiskIAMBindingParameters) DeepCopy() *DiskIAMBindingParameters { + if in == nil { + return nil + } + out := new(DiskIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingSpec) DeepCopyInto(out *DiskIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingSpec. +func (in *DiskIAMBindingSpec) DeepCopy() *DiskIAMBindingSpec { + if in == nil { + return nil + } + out := new(DiskIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingStatus) DeepCopyInto(out *DiskIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingStatus. +func (in *DiskIAMBindingStatus) DeepCopy() *DiskIAMBindingStatus { + if in == nil { + return nil + } + out := new(DiskIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskInitParameters) DeepCopyInto(out *DiskInitParameters) { + *out = *in + if in.AllowRecreate != nil { + in, out := &in.AllowRecreate, &out.AllowRecreate + *out = new(bool) + **out = **in + } + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskPlacementPolicy != nil { + in, out := &in.DiskPlacementPolicy, &out.DiskPlacementPolicy + *out = make([]DiskPlacementPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]HardwareGenerationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.ImageIDRef != nil { + in, out := &in.ImageIDRef, &out.ImageIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageIDSelector != nil { + in, out := &in.ImageIDSelector, &out.ImageIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskInitParameters. +func (in *DiskInitParameters) DeepCopy() *DiskInitParameters { + if in == nil { + return nil + } + out := new(DiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskList) DeepCopyInto(out *DiskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Disk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskList. +func (in *DiskList) DeepCopy() *DiskList { + if in == nil { + return nil + } + out := new(DiskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskObservation) DeepCopyInto(out *DiskObservation) { + *out = *in + if in.AllowRecreate != nil { + in, out := &in.AllowRecreate, &out.AllowRecreate + *out = new(bool) + **out = **in + } + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskPlacementPolicy != nil { + in, out := &in.DiskPlacementPolicy, &out.DiskPlacementPolicy + *out = make([]DiskPlacementPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]HardwareGenerationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProductIds != nil { + in, out := &in.ProductIds, &out.ProductIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskObservation. +func (in *DiskObservation) DeepCopy() *DiskObservation { + if in == nil { + return nil + } + out := new(DiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskParameters) DeepCopyInto(out *DiskParameters) { + *out = *in + if in.AllowRecreate != nil { + in, out := &in.AllowRecreate, &out.AllowRecreate + *out = new(bool) + **out = **in + } + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskPlacementPolicy != nil { + in, out := &in.DiskPlacementPolicy, &out.DiskPlacementPolicy + *out = make([]DiskPlacementPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]HardwareGenerationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.ImageIDRef != nil { + in, out := &in.ImageIDRef, &out.ImageIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageIDSelector != nil { + in, out := &in.ImageIDSelector, &out.ImageIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskParameters. +func (in *DiskParameters) DeepCopy() *DiskParameters { + if in == nil { + return nil + } + out := new(DiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroup) DeepCopyInto(out *DiskPlacementGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroup. +func (in *DiskPlacementGroup) DeepCopy() *DiskPlacementGroup { + if in == nil { + return nil + } + out := new(DiskPlacementGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskPlacementGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBinding) DeepCopyInto(out *DiskPlacementGroupIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBinding. +func (in *DiskPlacementGroupIAMBinding) DeepCopy() *DiskPlacementGroupIAMBinding { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskPlacementGroupIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingInitParameters) DeepCopyInto(out *DiskPlacementGroupIAMBindingInitParameters) { + *out = *in + if in.DiskPlacementGroupID != nil { + in, out := &in.DiskPlacementGroupID, &out.DiskPlacementGroupID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingInitParameters. +func (in *DiskPlacementGroupIAMBindingInitParameters) DeepCopy() *DiskPlacementGroupIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingList) DeepCopyInto(out *DiskPlacementGroupIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DiskPlacementGroupIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingList. +func (in *DiskPlacementGroupIAMBindingList) DeepCopy() *DiskPlacementGroupIAMBindingList { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskPlacementGroupIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingObservation) DeepCopyInto(out *DiskPlacementGroupIAMBindingObservation) { + *out = *in + if in.DiskPlacementGroupID != nil { + in, out := &in.DiskPlacementGroupID, &out.DiskPlacementGroupID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingObservation. +func (in *DiskPlacementGroupIAMBindingObservation) DeepCopy() *DiskPlacementGroupIAMBindingObservation { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingParameters) DeepCopyInto(out *DiskPlacementGroupIAMBindingParameters) { + *out = *in + if in.DiskPlacementGroupID != nil { + in, out := &in.DiskPlacementGroupID, &out.DiskPlacementGroupID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingParameters. +func (in *DiskPlacementGroupIAMBindingParameters) DeepCopy() *DiskPlacementGroupIAMBindingParameters { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingSpec) DeepCopyInto(out *DiskPlacementGroupIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingSpec. +func (in *DiskPlacementGroupIAMBindingSpec) DeepCopy() *DiskPlacementGroupIAMBindingSpec { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingStatus) DeepCopyInto(out *DiskPlacementGroupIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingStatus. +func (in *DiskPlacementGroupIAMBindingStatus) DeepCopy() *DiskPlacementGroupIAMBindingStatus { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupInitParameters) DeepCopyInto(out *DiskPlacementGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupInitParameters. +func (in *DiskPlacementGroupInitParameters) DeepCopy() *DiskPlacementGroupInitParameters { + if in == nil { + return nil + } + out := new(DiskPlacementGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupList) DeepCopyInto(out *DiskPlacementGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DiskPlacementGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupList. +func (in *DiskPlacementGroupList) DeepCopy() *DiskPlacementGroupList { + if in == nil { + return nil + } + out := new(DiskPlacementGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskPlacementGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupObservation) DeepCopyInto(out *DiskPlacementGroupObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupObservation. +func (in *DiskPlacementGroupObservation) DeepCopy() *DiskPlacementGroupObservation { + if in == nil { + return nil + } + out := new(DiskPlacementGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupParameters) DeepCopyInto(out *DiskPlacementGroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupParameters. +func (in *DiskPlacementGroupParameters) DeepCopy() *DiskPlacementGroupParameters { + if in == nil { + return nil + } + out := new(DiskPlacementGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupSpec) DeepCopyInto(out *DiskPlacementGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupSpec. +func (in *DiskPlacementGroupSpec) DeepCopy() *DiskPlacementGroupSpec { + if in == nil { + return nil + } + out := new(DiskPlacementGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupStatus) DeepCopyInto(out *DiskPlacementGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupStatus. +func (in *DiskPlacementGroupStatus) DeepCopy() *DiskPlacementGroupStatus { + if in == nil { + return nil + } + out := new(DiskPlacementGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementPolicyInitParameters) DeepCopyInto(out *DiskPlacementPolicyInitParameters) { + *out = *in + if in.DiskPlacementGroupID != nil { + in, out := &in.DiskPlacementGroupID, &out.DiskPlacementGroupID + *out = new(string) + **out = **in + } + if in.DiskPlacementGroupIDRef != nil { + in, out := &in.DiskPlacementGroupIDRef, &out.DiskPlacementGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DiskPlacementGroupIDSelector != nil { + in, out := &in.DiskPlacementGroupIDSelector, &out.DiskPlacementGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementPolicyInitParameters. +func (in *DiskPlacementPolicyInitParameters) DeepCopy() *DiskPlacementPolicyInitParameters { + if in == nil { + return nil + } + out := new(DiskPlacementPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementPolicyObservation) DeepCopyInto(out *DiskPlacementPolicyObservation) { + *out = *in + if in.DiskPlacementGroupID != nil { + in, out := &in.DiskPlacementGroupID, &out.DiskPlacementGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementPolicyObservation. +func (in *DiskPlacementPolicyObservation) DeepCopy() *DiskPlacementPolicyObservation { + if in == nil { + return nil + } + out := new(DiskPlacementPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementPolicyParameters) DeepCopyInto(out *DiskPlacementPolicyParameters) { + *out = *in + if in.DiskPlacementGroupID != nil { + in, out := &in.DiskPlacementGroupID, &out.DiskPlacementGroupID + *out = new(string) + **out = **in + } + if in.DiskPlacementGroupIDRef != nil { + in, out := &in.DiskPlacementGroupIDRef, &out.DiskPlacementGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DiskPlacementGroupIDSelector != nil { + in, out := &in.DiskPlacementGroupIDSelector, &out.DiskPlacementGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementPolicyParameters. +func (in *DiskPlacementPolicyParameters) DeepCopy() *DiskPlacementPolicyParameters { + if in == nil { + return nil + } + out := new(DiskPlacementPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSpec) DeepCopyInto(out *DiskSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSpec. +func (in *DiskSpec) DeepCopy() *DiskSpec { + if in == nil { + return nil + } + out := new(DiskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskStatus) DeepCopyInto(out *DiskStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskStatus. +func (in *DiskStatus) DeepCopy() *DiskStatus { + if in == nil { + return nil + } + out := new(DiskStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filesystem) DeepCopyInto(out *Filesystem) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filesystem. +func (in *Filesystem) DeepCopy() *Filesystem { + if in == nil { + return nil + } + out := new(Filesystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Filesystem) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBinding) DeepCopyInto(out *FilesystemIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBinding. +func (in *FilesystemIAMBinding) DeepCopy() *FilesystemIAMBinding { + if in == nil { + return nil + } + out := new(FilesystemIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FilesystemIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingInitParameters) DeepCopyInto(out *FilesystemIAMBindingInitParameters) { + *out = *in + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingInitParameters. +func (in *FilesystemIAMBindingInitParameters) DeepCopy() *FilesystemIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingList) DeepCopyInto(out *FilesystemIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FilesystemIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingList. +func (in *FilesystemIAMBindingList) DeepCopy() *FilesystemIAMBindingList { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FilesystemIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingObservation) DeepCopyInto(out *FilesystemIAMBindingObservation) { + *out = *in + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingObservation. +func (in *FilesystemIAMBindingObservation) DeepCopy() *FilesystemIAMBindingObservation { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingParameters) DeepCopyInto(out *FilesystemIAMBindingParameters) { + *out = *in + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingParameters. +func (in *FilesystemIAMBindingParameters) DeepCopy() *FilesystemIAMBindingParameters { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingSpec) DeepCopyInto(out *FilesystemIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingSpec. +func (in *FilesystemIAMBindingSpec) DeepCopy() *FilesystemIAMBindingSpec { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingStatus) DeepCopyInto(out *FilesystemIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingStatus. +func (in *FilesystemIAMBindingStatus) DeepCopy() *FilesystemIAMBindingStatus { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemInitParameters) DeepCopyInto(out *FilesystemInitParameters) { + *out = *in + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemInitParameters. +func (in *FilesystemInitParameters) DeepCopy() *FilesystemInitParameters { + if in == nil { + return nil + } + out := new(FilesystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemList) DeepCopyInto(out *FilesystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Filesystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemList. +func (in *FilesystemList) DeepCopy() *FilesystemList { + if in == nil { + return nil + } + out := new(FilesystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FilesystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemObservation) DeepCopyInto(out *FilesystemObservation) { + *out = *in + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemObservation. +func (in *FilesystemObservation) DeepCopy() *FilesystemObservation { + if in == nil { + return nil + } + out := new(FilesystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemParameters) DeepCopyInto(out *FilesystemParameters) { + *out = *in + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemParameters. +func (in *FilesystemParameters) DeepCopy() *FilesystemParameters { + if in == nil { + return nil + } + out := new(FilesystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemSpec) DeepCopyInto(out *FilesystemSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSpec. +func (in *FilesystemSpec) DeepCopy() *FilesystemSpec { + if in == nil { + return nil + } + out := new(FilesystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemStatus) DeepCopyInto(out *FilesystemStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemStatus. +func (in *FilesystemStatus) DeepCopy() *FilesystemStatus { + if in == nil { + return nil + } + out := new(FilesystemStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleInitParameters) DeepCopyInto(out *FixedScaleInitParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleInitParameters. +func (in *FixedScaleInitParameters) DeepCopy() *FixedScaleInitParameters { + if in == nil { + return nil + } + out := new(FixedScaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleObservation) DeepCopyInto(out *FixedScaleObservation) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleObservation. +func (in *FixedScaleObservation) DeepCopy() *FixedScaleObservation { + if in == nil { + return nil + } + out := new(FixedScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleParameters) DeepCopyInto(out *FixedScaleParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleParameters. +func (in *FixedScaleParameters) DeepCopy() *FixedScaleParameters { + if in == nil { + return nil + } + out := new(FixedScaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Generation2FeaturesInitParameters) DeepCopyInto(out *Generation2FeaturesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generation2FeaturesInitParameters. +func (in *Generation2FeaturesInitParameters) DeepCopy() *Generation2FeaturesInitParameters { + if in == nil { + return nil + } + out := new(Generation2FeaturesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Generation2FeaturesObservation) DeepCopyInto(out *Generation2FeaturesObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generation2FeaturesObservation. +func (in *Generation2FeaturesObservation) DeepCopy() *Generation2FeaturesObservation { + if in == nil { + return nil + } + out := new(Generation2FeaturesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Generation2FeaturesParameters) DeepCopyInto(out *Generation2FeaturesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generation2FeaturesParameters. +func (in *Generation2FeaturesParameters) DeepCopy() *Generation2FeaturesParameters { + if in == nil { + return nil + } + out := new(Generation2FeaturesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuCluster) DeepCopyInto(out *GpuCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuCluster. +func (in *GpuCluster) DeepCopy() *GpuCluster { + if in == nil { + return nil + } + out := new(GpuCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GpuCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBinding) DeepCopyInto(out *GpuClusterIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBinding. +func (in *GpuClusterIAMBinding) DeepCopy() *GpuClusterIAMBinding { + if in == nil { + return nil + } + out := new(GpuClusterIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GpuClusterIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingInitParameters) DeepCopyInto(out *GpuClusterIAMBindingInitParameters) { + *out = *in + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingInitParameters. +func (in *GpuClusterIAMBindingInitParameters) DeepCopy() *GpuClusterIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(GpuClusterIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingList) DeepCopyInto(out *GpuClusterIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GpuClusterIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingList. +func (in *GpuClusterIAMBindingList) DeepCopy() *GpuClusterIAMBindingList { + if in == nil { + return nil + } + out := new(GpuClusterIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GpuClusterIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingObservation) DeepCopyInto(out *GpuClusterIAMBindingObservation) { + *out = *in + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingObservation. +func (in *GpuClusterIAMBindingObservation) DeepCopy() *GpuClusterIAMBindingObservation { + if in == nil { + return nil + } + out := new(GpuClusterIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingParameters) DeepCopyInto(out *GpuClusterIAMBindingParameters) { + *out = *in + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingParameters. +func (in *GpuClusterIAMBindingParameters) DeepCopy() *GpuClusterIAMBindingParameters { + if in == nil { + return nil + } + out := new(GpuClusterIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingSpec) DeepCopyInto(out *GpuClusterIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingSpec. +func (in *GpuClusterIAMBindingSpec) DeepCopy() *GpuClusterIAMBindingSpec { + if in == nil { + return nil + } + out := new(GpuClusterIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingStatus) DeepCopyInto(out *GpuClusterIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingStatus. +func (in *GpuClusterIAMBindingStatus) DeepCopy() *GpuClusterIAMBindingStatus { + if in == nil { + return nil + } + out := new(GpuClusterIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterInitParameters) DeepCopyInto(out *GpuClusterInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InterconnectType != nil { + in, out := &in.InterconnectType, &out.InterconnectType + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterInitParameters. +func (in *GpuClusterInitParameters) DeepCopy() *GpuClusterInitParameters { + if in == nil { + return nil + } + out := new(GpuClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterList) DeepCopyInto(out *GpuClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GpuCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterList. +func (in *GpuClusterList) DeepCopy() *GpuClusterList { + if in == nil { + return nil + } + out := new(GpuClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GpuClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterObservation) DeepCopyInto(out *GpuClusterObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InterconnectType != nil { + in, out := &in.InterconnectType, &out.InterconnectType + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterObservation. +func (in *GpuClusterObservation) DeepCopy() *GpuClusterObservation { + if in == nil { + return nil + } + out := new(GpuClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterParameters) DeepCopyInto(out *GpuClusterParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InterconnectType != nil { + in, out := &in.InterconnectType, &out.InterconnectType + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterParameters. +func (in *GpuClusterParameters) DeepCopy() *GpuClusterParameters { + if in == nil { + return nil + } + out := new(GpuClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterSpec) DeepCopyInto(out *GpuClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterSpec. +func (in *GpuClusterSpec) DeepCopy() *GpuClusterSpec { + if in == nil { + return nil + } + out := new(GpuClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterStatus) DeepCopyInto(out *GpuClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterStatus. +func (in *GpuClusterStatus) DeepCopy() *GpuClusterStatus { + if in == nil { + return nil + } + out := new(GpuClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPOptionsInitParameters) DeepCopyInto(out *HTTPOptionsInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPOptionsInitParameters. +func (in *HTTPOptionsInitParameters) DeepCopy() *HTTPOptionsInitParameters { + if in == nil { + return nil + } + out := new(HTTPOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPOptionsObservation) DeepCopyInto(out *HTTPOptionsObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPOptionsObservation. +func (in *HTTPOptionsObservation) DeepCopy() *HTTPOptionsObservation { + if in == nil { + return nil + } + out := new(HTTPOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPOptionsParameters) DeepCopyInto(out *HTTPOptionsParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPOptionsParameters. +func (in *HTTPOptionsParameters) DeepCopy() *HTTPOptionsParameters { + if in == nil { + return nil + } + out := new(HTTPOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareGenerationGeneration2FeaturesInitParameters) DeepCopyInto(out *HardwareGenerationGeneration2FeaturesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareGenerationGeneration2FeaturesInitParameters. +func (in *HardwareGenerationGeneration2FeaturesInitParameters) DeepCopy() *HardwareGenerationGeneration2FeaturesInitParameters { + if in == nil { + return nil + } + out := new(HardwareGenerationGeneration2FeaturesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareGenerationGeneration2FeaturesObservation) DeepCopyInto(out *HardwareGenerationGeneration2FeaturesObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareGenerationGeneration2FeaturesObservation. +func (in *HardwareGenerationGeneration2FeaturesObservation) DeepCopy() *HardwareGenerationGeneration2FeaturesObservation { + if in == nil { + return nil + } + out := new(HardwareGenerationGeneration2FeaturesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareGenerationGeneration2FeaturesParameters) DeepCopyInto(out *HardwareGenerationGeneration2FeaturesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareGenerationGeneration2FeaturesParameters. +func (in *HardwareGenerationGeneration2FeaturesParameters) DeepCopy() *HardwareGenerationGeneration2FeaturesParameters { + if in == nil { + return nil + } + out := new(HardwareGenerationGeneration2FeaturesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareGenerationInitParameters) DeepCopyInto(out *HardwareGenerationInitParameters) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]Generation2FeaturesInitParameters, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]LegacyFeaturesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareGenerationInitParameters. +func (in *HardwareGenerationInitParameters) DeepCopy() *HardwareGenerationInitParameters { + if in == nil { + return nil + } + out := new(HardwareGenerationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareGenerationLegacyFeaturesInitParameters) DeepCopyInto(out *HardwareGenerationLegacyFeaturesInitParameters) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareGenerationLegacyFeaturesInitParameters. +func (in *HardwareGenerationLegacyFeaturesInitParameters) DeepCopy() *HardwareGenerationLegacyFeaturesInitParameters { + if in == nil { + return nil + } + out := new(HardwareGenerationLegacyFeaturesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareGenerationLegacyFeaturesObservation) DeepCopyInto(out *HardwareGenerationLegacyFeaturesObservation) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareGenerationLegacyFeaturesObservation. +func (in *HardwareGenerationLegacyFeaturesObservation) DeepCopy() *HardwareGenerationLegacyFeaturesObservation { + if in == nil { + return nil + } + out := new(HardwareGenerationLegacyFeaturesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareGenerationLegacyFeaturesParameters) DeepCopyInto(out *HardwareGenerationLegacyFeaturesParameters) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareGenerationLegacyFeaturesParameters. +func (in *HardwareGenerationLegacyFeaturesParameters) DeepCopy() *HardwareGenerationLegacyFeaturesParameters { + if in == nil { + return nil + } + out := new(HardwareGenerationLegacyFeaturesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareGenerationObservation) DeepCopyInto(out *HardwareGenerationObservation) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]Generation2FeaturesParameters, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]LegacyFeaturesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareGenerationObservation. +func (in *HardwareGenerationObservation) DeepCopy() *HardwareGenerationObservation { + if in == nil { + return nil + } + out := new(HardwareGenerationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareGenerationParameters) DeepCopyInto(out *HardwareGenerationParameters) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]Generation2FeaturesParameters, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]LegacyFeaturesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareGenerationParameters. +func (in *HardwareGenerationParameters) DeepCopy() *HardwareGenerationParameters { + if in == nil { + return nil + } + out := new(HardwareGenerationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckInitParameters) DeepCopyInto(out *HealthCheckInitParameters) { + *out = *in + if in.HTTPOptions != nil { + in, out := &in.HTTPOptions, &out.HTTPOptions + *out = make([]HTTPOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.TCPOptions != nil { + in, out := &in.TCPOptions, &out.TCPOptions + *out = make([]TCPOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckInitParameters. +func (in *HealthCheckInitParameters) DeepCopy() *HealthCheckInitParameters { + if in == nil { + return nil + } + out := new(HealthCheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckObservation) DeepCopyInto(out *HealthCheckObservation) { + *out = *in + if in.HTTPOptions != nil { + in, out := &in.HTTPOptions, &out.HTTPOptions + *out = make([]HTTPOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.TCPOptions != nil { + in, out := &in.TCPOptions, &out.TCPOptions + *out = make([]TCPOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckObservation. +func (in *HealthCheckObservation) DeepCopy() *HealthCheckObservation { + if in == nil { + return nil + } + out := new(HealthCheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckParameters) DeepCopyInto(out *HealthCheckParameters) { + *out = *in + if in.HTTPOptions != nil { + in, out := &in.HTTPOptions, &out.HTTPOptions + *out = make([]HTTPOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.TCPOptions != nil { + in, out := &in.TCPOptions, &out.TCPOptions + *out = make([]TCPOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckParameters. +func (in *HealthCheckParameters) DeepCopy() *HealthCheckParameters { + if in == nil { + return nil + } + out := new(HealthCheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAffinityRulesInitParameters) DeepCopyInto(out *HostAffinityRulesInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Op != nil { + in, out := &in.Op, &out.Op + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAffinityRulesInitParameters. +func (in *HostAffinityRulesInitParameters) DeepCopy() *HostAffinityRulesInitParameters { + if in == nil { + return nil + } + out := new(HostAffinityRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAffinityRulesObservation) DeepCopyInto(out *HostAffinityRulesObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Op != nil { + in, out := &in.Op, &out.Op + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAffinityRulesObservation. +func (in *HostAffinityRulesObservation) DeepCopy() *HostAffinityRulesObservation { + if in == nil { + return nil + } + out := new(HostAffinityRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAffinityRulesParameters) DeepCopyInto(out *HostAffinityRulesParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Op != nil { + in, out := &in.Op, &out.Op + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAffinityRulesParameters. +func (in *HostAffinityRulesParameters) DeepCopy() *HostAffinityRulesParameters { + if in == nil { + return nil + } + out := new(HostAffinityRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6DNSRecordInitParameters) DeepCopyInto(out *IPv6DNSRecordInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordInitParameters. +func (in *IPv6DNSRecordInitParameters) DeepCopy() *IPv6DNSRecordInitParameters { + if in == nil { + return nil + } + out := new(IPv6DNSRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6DNSRecordObservation) DeepCopyInto(out *IPv6DNSRecordObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordObservation. +func (in *IPv6DNSRecordObservation) DeepCopy() *IPv6DNSRecordObservation { + if in == nil { + return nil + } + out := new(IPv6DNSRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6DNSRecordParameters) DeepCopyInto(out *IPv6DNSRecordParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordParameters. +func (in *IPv6DNSRecordParameters) DeepCopy() *IPv6DNSRecordParameters { + if in == nil { + return nil + } + out := new(IPv6DNSRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageHardwareGenerationInitParameters) DeepCopyInto(out *ImageHardwareGenerationInitParameters) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]HardwareGenerationGeneration2FeaturesInitParameters, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]HardwareGenerationLegacyFeaturesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageHardwareGenerationInitParameters. +func (in *ImageHardwareGenerationInitParameters) DeepCopy() *ImageHardwareGenerationInitParameters { + if in == nil { + return nil + } + out := new(ImageHardwareGenerationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageHardwareGenerationObservation) DeepCopyInto(out *ImageHardwareGenerationObservation) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]HardwareGenerationGeneration2FeaturesParameters, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]HardwareGenerationLegacyFeaturesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageHardwareGenerationObservation. +func (in *ImageHardwareGenerationObservation) DeepCopy() *ImageHardwareGenerationObservation { + if in == nil { + return nil + } + out := new(ImageHardwareGenerationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageHardwareGenerationParameters) DeepCopyInto(out *ImageHardwareGenerationParameters) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]HardwareGenerationGeneration2FeaturesParameters, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]HardwareGenerationLegacyFeaturesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageHardwareGenerationParameters. +func (in *ImageHardwareGenerationParameters) DeepCopy() *ImageHardwareGenerationParameters { + if in == nil { + return nil + } + out := new(ImageHardwareGenerationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBinding) DeepCopyInto(out *ImageIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBinding. +func (in *ImageIAMBinding) DeepCopy() *ImageIAMBinding { + if in == nil { + return nil + } + out := new(ImageIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBindingInitParameters) DeepCopyInto(out *ImageIAMBindingInitParameters) { + *out = *in + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingInitParameters. +func (in *ImageIAMBindingInitParameters) DeepCopy() *ImageIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(ImageIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBindingList) DeepCopyInto(out *ImageIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingList. +func (in *ImageIAMBindingList) DeepCopy() *ImageIAMBindingList { + if in == nil { + return nil + } + out := new(ImageIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBindingObservation) DeepCopyInto(out *ImageIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingObservation. +func (in *ImageIAMBindingObservation) DeepCopy() *ImageIAMBindingObservation { + if in == nil { + return nil + } + out := new(ImageIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBindingParameters) DeepCopyInto(out *ImageIAMBindingParameters) { + *out = *in + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingParameters. +func (in *ImageIAMBindingParameters) DeepCopy() *ImageIAMBindingParameters { + if in == nil { + return nil + } + out := new(ImageIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBindingSpec) DeepCopyInto(out *ImageIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingSpec. +func (in *ImageIAMBindingSpec) DeepCopy() *ImageIAMBindingSpec { + if in == nil { + return nil + } + out := new(ImageIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBindingStatus) DeepCopyInto(out *ImageIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingStatus. +func (in *ImageIAMBindingStatus) DeepCopy() *ImageIAMBindingStatus { + if in == nil { + return nil + } + out := new(ImageIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageInitParameters) DeepCopyInto(out *ImageInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]ImageHardwareGenerationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MinDiskSize != nil { + in, out := &in.MinDiskSize, &out.MinDiskSize + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.Pooled != nil { + in, out := &in.Pooled, &out.Pooled + *out = new(bool) + **out = **in + } + if in.ProductIds != nil { + in, out := &in.ProductIds, &out.ProductIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceDisk != nil { + in, out := &in.SourceDisk, &out.SourceDisk + *out = new(string) + **out = **in + } + if in.SourceFamily != nil { + in, out := &in.SourceFamily, &out.SourceFamily + *out = new(string) + **out = **in + } + if in.SourceImage != nil { + in, out := &in.SourceImage, &out.SourceImage + *out = new(string) + **out = **in + } + if in.SourceSnapshot != nil { + in, out := &in.SourceSnapshot, &out.SourceSnapshot + *out = new(string) + **out = **in + } + if in.SourceURL != nil { + in, out := &in.SourceURL, &out.SourceURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageInitParameters. +func (in *ImageInitParameters) DeepCopy() *ImageInitParameters { + if in == nil { + return nil + } + out := new(ImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageObservation) DeepCopyInto(out *ImageObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]ImageHardwareGenerationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MinDiskSize != nil { + in, out := &in.MinDiskSize, &out.MinDiskSize + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.Pooled != nil { + in, out := &in.Pooled, &out.Pooled + *out = new(bool) + **out = **in + } + if in.ProductIds != nil { + in, out := &in.ProductIds, &out.ProductIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SourceDisk != nil { + in, out := &in.SourceDisk, &out.SourceDisk + *out = new(string) + **out = **in + } + if in.SourceFamily != nil { + in, out := &in.SourceFamily, &out.SourceFamily + *out = new(string) + **out = **in + } + if in.SourceImage != nil { + in, out := &in.SourceImage, &out.SourceImage + *out = new(string) + **out = **in + } + if in.SourceSnapshot != nil { + in, out := &in.SourceSnapshot, &out.SourceSnapshot + *out = new(string) + **out = **in + } + if in.SourceURL != nil { + in, out := &in.SourceURL, &out.SourceURL + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageObservation. +func (in *ImageObservation) DeepCopy() *ImageObservation { + if in == nil { + return nil + } + out := new(ImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageParameters) DeepCopyInto(out *ImageParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]ImageHardwareGenerationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MinDiskSize != nil { + in, out := &in.MinDiskSize, &out.MinDiskSize + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.Pooled != nil { + in, out := &in.Pooled, &out.Pooled + *out = new(bool) + **out = **in + } + if in.ProductIds != nil { + in, out := &in.ProductIds, &out.ProductIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceDisk != nil { + in, out := &in.SourceDisk, &out.SourceDisk + *out = new(string) + **out = **in + } + if in.SourceFamily != nil { + in, out := &in.SourceFamily, &out.SourceFamily + *out = new(string) + **out = **in + } + if in.SourceImage != nil { + in, out := &in.SourceImage, &out.SourceImage + *out = new(string) + **out = **in + } + if in.SourceSnapshot != nil { + in, out := &in.SourceSnapshot, &out.SourceSnapshot + *out = new(string) + **out = **in + } + if in.SourceURL != nil { + in, out := &in.SourceURL, &out.SourceURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageParameters. +func (in *ImageParameters) DeepCopy() *ImageParameters { + if in == nil { + return nil + } + out := new(ImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { + if in == nil { + return nil + } + out := new(ImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializeParamsInitParameters) DeepCopyInto(out *InitializeParamsInitParameters) { + *out = *in + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.ImageIDRef != nil { + in, out := &in.ImageIDRef, &out.ImageIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageIDSelector != nil { + in, out := &in.ImageIDSelector, &out.ImageIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializeParamsInitParameters. +func (in *InitializeParamsInitParameters) DeepCopy() *InitializeParamsInitParameters { + if in == nil { + return nil + } + out := new(InitializeParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializeParamsObservation) DeepCopyInto(out *InitializeParamsObservation) { + *out = *in + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializeParamsObservation. +func (in *InitializeParamsObservation) DeepCopy() *InitializeParamsObservation { + if in == nil { + return nil + } + out := new(InitializeParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializeParamsParameters) DeepCopyInto(out *InitializeParamsParameters) { + *out = *in + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.ImageIDRef != nil { + in, out := &in.ImageIDRef, &out.ImageIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageIDSelector != nil { + in, out := &in.ImageIDSelector, &out.ImageIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializeParamsParameters. +func (in *InitializeParamsParameters) DeepCopy() *InitializeParamsParameters { + if in == nil { + return nil + } + out := new(InitializeParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance) DeepCopyInto(out *Instance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. +func (in *Instance) DeepCopy() *Instance { + if in == nil { + return nil + } + out := new(Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Instance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceFilesystemInitParameters) DeepCopyInto(out *InstanceFilesystemInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceFilesystemInitParameters. +func (in *InstanceFilesystemInitParameters) DeepCopy() *InstanceFilesystemInitParameters { + if in == nil { + return nil + } + out := new(InstanceFilesystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceFilesystemObservation) DeepCopyInto(out *InstanceFilesystemObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceFilesystemObservation. +func (in *InstanceFilesystemObservation) DeepCopy() *InstanceFilesystemObservation { + if in == nil { + return nil + } + out := new(InstanceFilesystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceFilesystemParameters) DeepCopyInto(out *InstanceFilesystemParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceFilesystemParameters. +func (in *InstanceFilesystemParameters) DeepCopy() *InstanceFilesystemParameters { + if in == nil { + return nil + } + out := new(InstanceFilesystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceGroup) DeepCopyInto(out *InstanceGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroup. +func (in *InstanceGroup) DeepCopy() *InstanceGroup { + if in == nil { + return nil + } + out := new(InstanceGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceGroupInitParameters) DeepCopyInto(out *InstanceGroupInitParameters) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = make([]AllocationPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationLoadBalancer != nil { + in, out := &in.ApplicationLoadBalancer, &out.ApplicationLoadBalancer + *out = make([]ApplicationLoadBalancerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.DeployPolicy != nil { + in, out := &in.DeployPolicy, &out.DeployPolicy + *out = make([]DeployPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = make([]HealthCheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceTemplate != nil { + in, out := &in.InstanceTemplate, &out.InstanceTemplate + *out = make([]InstanceTemplateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = make([]LoadBalancerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxCheckingHealthDuration != nil { + in, out := &in.MaxCheckingHealthDuration, &out.MaxCheckingHealthDuration + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupInitParameters. +func (in *InstanceGroupInitParameters) DeepCopy() *InstanceGroupInitParameters { + if in == nil { + return nil + } + out := new(InstanceGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceGroupList) DeepCopyInto(out *InstanceGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InstanceGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupList. +func (in *InstanceGroupList) DeepCopy() *InstanceGroupList { + if in == nil { + return nil + } + out := new(InstanceGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceGroupObservation) DeepCopyInto(out *InstanceGroupObservation) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = make([]AllocationPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationLoadBalancer != nil { + in, out := &in.ApplicationLoadBalancer, &out.ApplicationLoadBalancer + *out = make([]ApplicationLoadBalancerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.DeployPolicy != nil { + in, out := &in.DeployPolicy, &out.DeployPolicy + *out = make([]DeployPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = make([]HealthCheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceTemplate != nil { + in, out := &in.InstanceTemplate, &out.InstanceTemplate + *out = make([]InstanceTemplateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]InstancesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = make([]LoadBalancerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxCheckingHealthDuration != nil { + in, out := &in.MaxCheckingHealthDuration, &out.MaxCheckingHealthDuration + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupObservation. +func (in *InstanceGroupObservation) DeepCopy() *InstanceGroupObservation { + if in == nil { + return nil + } + out := new(InstanceGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceGroupParameters) DeepCopyInto(out *InstanceGroupParameters) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = make([]AllocationPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationLoadBalancer != nil { + in, out := &in.ApplicationLoadBalancer, &out.ApplicationLoadBalancer + *out = make([]ApplicationLoadBalancerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.DeployPolicy != nil { + in, out := &in.DeployPolicy, &out.DeployPolicy + *out = make([]DeployPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = make([]HealthCheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceTemplate != nil { + in, out := &in.InstanceTemplate, &out.InstanceTemplate + *out = make([]InstanceTemplateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = make([]LoadBalancerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxCheckingHealthDuration != nil { + in, out := &in.MaxCheckingHealthDuration, &out.MaxCheckingHealthDuration + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupParameters. +func (in *InstanceGroupParameters) DeepCopy() *InstanceGroupParameters { + if in == nil { + return nil + } + out := new(InstanceGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupSpec. +func (in *InstanceGroupSpec) DeepCopy() *InstanceGroupSpec { + if in == nil { + return nil + } + out := new(InstanceGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceGroupStatus) DeepCopyInto(out *InstanceGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupStatus. +func (in *InstanceGroupStatus) DeepCopy() *InstanceGroupStatus { + if in == nil { + return nil + } + out := new(InstanceGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceHardwareGenerationGeneration2FeaturesInitParameters) DeepCopyInto(out *InstanceHardwareGenerationGeneration2FeaturesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceHardwareGenerationGeneration2FeaturesInitParameters. +func (in *InstanceHardwareGenerationGeneration2FeaturesInitParameters) DeepCopy() *InstanceHardwareGenerationGeneration2FeaturesInitParameters { + if in == nil { + return nil + } + out := new(InstanceHardwareGenerationGeneration2FeaturesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceHardwareGenerationGeneration2FeaturesObservation) DeepCopyInto(out *InstanceHardwareGenerationGeneration2FeaturesObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceHardwareGenerationGeneration2FeaturesObservation. +func (in *InstanceHardwareGenerationGeneration2FeaturesObservation) DeepCopy() *InstanceHardwareGenerationGeneration2FeaturesObservation { + if in == nil { + return nil + } + out := new(InstanceHardwareGenerationGeneration2FeaturesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceHardwareGenerationGeneration2FeaturesParameters) DeepCopyInto(out *InstanceHardwareGenerationGeneration2FeaturesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceHardwareGenerationGeneration2FeaturesParameters. +func (in *InstanceHardwareGenerationGeneration2FeaturesParameters) DeepCopy() *InstanceHardwareGenerationGeneration2FeaturesParameters { + if in == nil { + return nil + } + out := new(InstanceHardwareGenerationGeneration2FeaturesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceHardwareGenerationInitParameters) DeepCopyInto(out *InstanceHardwareGenerationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceHardwareGenerationInitParameters. +func (in *InstanceHardwareGenerationInitParameters) DeepCopy() *InstanceHardwareGenerationInitParameters { + if in == nil { + return nil + } + out := new(InstanceHardwareGenerationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceHardwareGenerationLegacyFeaturesInitParameters) DeepCopyInto(out *InstanceHardwareGenerationLegacyFeaturesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceHardwareGenerationLegacyFeaturesInitParameters. +func (in *InstanceHardwareGenerationLegacyFeaturesInitParameters) DeepCopy() *InstanceHardwareGenerationLegacyFeaturesInitParameters { + if in == nil { + return nil + } + out := new(InstanceHardwareGenerationLegacyFeaturesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceHardwareGenerationLegacyFeaturesObservation) DeepCopyInto(out *InstanceHardwareGenerationLegacyFeaturesObservation) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceHardwareGenerationLegacyFeaturesObservation. +func (in *InstanceHardwareGenerationLegacyFeaturesObservation) DeepCopy() *InstanceHardwareGenerationLegacyFeaturesObservation { + if in == nil { + return nil + } + out := new(InstanceHardwareGenerationLegacyFeaturesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceHardwareGenerationLegacyFeaturesParameters) DeepCopyInto(out *InstanceHardwareGenerationLegacyFeaturesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceHardwareGenerationLegacyFeaturesParameters. +func (in *InstanceHardwareGenerationLegacyFeaturesParameters) DeepCopy() *InstanceHardwareGenerationLegacyFeaturesParameters { + if in == nil { + return nil + } + out := new(InstanceHardwareGenerationLegacyFeaturesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceHardwareGenerationObservation) DeepCopyInto(out *InstanceHardwareGenerationObservation) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]InstanceHardwareGenerationGeneration2FeaturesObservation, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]InstanceHardwareGenerationLegacyFeaturesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceHardwareGenerationObservation. +func (in *InstanceHardwareGenerationObservation) DeepCopy() *InstanceHardwareGenerationObservation { + if in == nil { + return nil + } + out := new(InstanceHardwareGenerationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceHardwareGenerationParameters) DeepCopyInto(out *InstanceHardwareGenerationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceHardwareGenerationParameters. +func (in *InstanceHardwareGenerationParameters) DeepCopy() *InstanceHardwareGenerationParameters { + if in == nil { + return nil + } + out := new(InstanceHardwareGenerationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBinding) DeepCopyInto(out *InstanceIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBinding. +func (in *InstanceIAMBinding) DeepCopy() *InstanceIAMBinding { + if in == nil { + return nil + } + out := new(InstanceIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingInitParameters) DeepCopyInto(out *InstanceIAMBindingInitParameters) { + *out = *in + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingInitParameters. +func (in *InstanceIAMBindingInitParameters) DeepCopy() *InstanceIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(InstanceIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingList) DeepCopyInto(out *InstanceIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InstanceIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingList. +func (in *InstanceIAMBindingList) DeepCopy() *InstanceIAMBindingList { + if in == nil { + return nil + } + out := new(InstanceIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingObservation) DeepCopyInto(out *InstanceIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingObservation. +func (in *InstanceIAMBindingObservation) DeepCopy() *InstanceIAMBindingObservation { + if in == nil { + return nil + } + out := new(InstanceIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingParameters) DeepCopyInto(out *InstanceIAMBindingParameters) { + *out = *in + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingParameters. +func (in *InstanceIAMBindingParameters) DeepCopy() *InstanceIAMBindingParameters { + if in == nil { + return nil + } + out := new(InstanceIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingSpec) DeepCopyInto(out *InstanceIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingSpec. +func (in *InstanceIAMBindingSpec) DeepCopy() *InstanceIAMBindingSpec { + if in == nil { + return nil + } + out := new(InstanceIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingStatus) DeepCopyInto(out *InstanceIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingStatus. +func (in *InstanceIAMBindingStatus) DeepCopy() *InstanceIAMBindingStatus { + if in == nil { + return nil + } + out := new(InstanceIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { + *out = *in + if in.AllowRecreate != nil { + in, out := &in.AllowRecreate, &out.AllowRecreate + *out = new(bool) + **out = **in + } + if in.AllowStoppingForUpdate != nil { + in, out := &in.AllowStoppingForUpdate, &out.AllowStoppingForUpdate + *out = new(bool) + **out = **in + } + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]BootDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Filesystem != nil { + in, out := &in.Filesystem, &out.Filesystem + *out = make([]InstanceFilesystemInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LocalDisk != nil { + in, out := &in.LocalDisk, &out.LocalDisk + *out = make([]LocalDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceGracePeriod != nil { + in, out := &in.MaintenanceGracePeriod, &out.MaintenanceGracePeriod + *out = new(string) + **out = **in + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = make([]MetadataOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkAccelerationType != nil { + in, out := &in.NetworkAccelerationType, &out.NetworkAccelerationType + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementPolicy != nil { + in, out := &in.PlacementPolicy, &out.PlacementPolicy + *out = make([]PlacementPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = make([]SchedulingPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryDisk != nil { + in, out := &in.SecondaryDisk, &out.SecondaryDisk + *out = make([]SecondaryDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceInitParameters. +func (in *InstanceInitParameters) DeepCopy() *InstanceInitParameters { + if in == nil { + return nil + } + out := new(InstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceList) DeepCopyInto(out *InstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Instance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceList. +func (in *InstanceList) DeepCopy() *InstanceList { + if in == nil { + return nil + } + out := new(InstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { + *out = *in + if in.AllowRecreate != nil { + in, out := &in.AllowRecreate, &out.AllowRecreate + *out = new(bool) + **out = **in + } + if in.AllowStoppingForUpdate != nil { + in, out := &in.AllowStoppingForUpdate, &out.AllowStoppingForUpdate + *out = new(bool) + **out = **in + } + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]BootDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Filesystem != nil { + in, out := &in.Filesystem, &out.Filesystem + *out = make([]InstanceFilesystemObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]InstanceHardwareGenerationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LocalDisk != nil { + in, out := &in.LocalDisk, &out.LocalDisk + *out = make([]LocalDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceGracePeriod != nil { + in, out := &in.MaintenanceGracePeriod, &out.MaintenanceGracePeriod + *out = new(string) + **out = **in + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = make([]MetadataOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkAccelerationType != nil { + in, out := &in.NetworkAccelerationType, &out.NetworkAccelerationType + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementPolicy != nil { + in, out := &in.PlacementPolicy, &out.PlacementPolicy + *out = make([]PlacementPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = make([]SchedulingPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryDisk != nil { + in, out := &in.SecondaryDisk, &out.SecondaryDisk + *out = make([]SecondaryDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceObservation. +func (in *InstanceObservation) DeepCopy() *InstanceObservation { + if in == nil { + return nil + } + out := new(InstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { + *out = *in + if in.AllowRecreate != nil { + in, out := &in.AllowRecreate, &out.AllowRecreate + *out = new(bool) + **out = **in + } + if in.AllowStoppingForUpdate != nil { + in, out := &in.AllowStoppingForUpdate, &out.AllowStoppingForUpdate + *out = new(bool) + **out = **in + } + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]BootDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Filesystem != nil { + in, out := &in.Filesystem, &out.Filesystem + *out = make([]InstanceFilesystemParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LocalDisk != nil { + in, out := &in.LocalDisk, &out.LocalDisk + *out = make([]LocalDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceGracePeriod != nil { + in, out := &in.MaintenanceGracePeriod, &out.MaintenanceGracePeriod + *out = new(string) + **out = **in + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = make([]MetadataOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkAccelerationType != nil { + in, out := &in.NetworkAccelerationType, &out.NetworkAccelerationType + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementPolicy != nil { + in, out := &in.PlacementPolicy, &out.PlacementPolicy + *out = make([]PlacementPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = make([]SchedulingPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryDisk != nil { + in, out := &in.SecondaryDisk, &out.SecondaryDisk + *out = make([]SecondaryDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceParameters. +func (in *InstanceParameters) DeepCopy() *InstanceParameters { + if in == nil { + return nil + } + out := new(InstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSpec) DeepCopyInto(out *InstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSpec. +func (in *InstanceSpec) DeepCopy() *InstanceSpec { + if in == nil { + return nil + } + out := new(InstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceStatus) DeepCopyInto(out *InstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus. +func (in *InstanceStatus) DeepCopy() *InstanceStatus { + if in == nil { + return nil + } + out := new(InstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTagsPoolInitParameters) DeepCopyInto(out *InstanceTagsPoolInitParameters) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTagsPoolInitParameters. +func (in *InstanceTagsPoolInitParameters) DeepCopy() *InstanceTagsPoolInitParameters { + if in == nil { + return nil + } + out := new(InstanceTagsPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTagsPoolObservation) DeepCopyInto(out *InstanceTagsPoolObservation) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTagsPoolObservation. +func (in *InstanceTagsPoolObservation) DeepCopy() *InstanceTagsPoolObservation { + if in == nil { + return nil + } + out := new(InstanceTagsPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTagsPoolParameters) DeepCopyInto(out *InstanceTagsPoolParameters) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTagsPoolParameters. +func (in *InstanceTagsPoolParameters) DeepCopy() *InstanceTagsPoolParameters { + if in == nil { + return nil + } + out := new(InstanceTagsPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateBootDiskInitParameters) DeepCopyInto(out *InstanceTemplateBootDiskInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]BootDiskInitializeParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateBootDiskInitParameters. +func (in *InstanceTemplateBootDiskInitParameters) DeepCopy() *InstanceTemplateBootDiskInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateBootDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateBootDiskObservation) DeepCopyInto(out *InstanceTemplateBootDiskObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]BootDiskInitializeParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateBootDiskObservation. +func (in *InstanceTemplateBootDiskObservation) DeepCopy() *InstanceTemplateBootDiskObservation { + if in == nil { + return nil + } + out := new(InstanceTemplateBootDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateBootDiskParameters) DeepCopyInto(out *InstanceTemplateBootDiskParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]BootDiskInitializeParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateBootDiskParameters. +func (in *InstanceTemplateBootDiskParameters) DeepCopy() *InstanceTemplateBootDiskParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateBootDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateFilesystemInitParameters) DeepCopyInto(out *InstanceTemplateFilesystemInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateFilesystemInitParameters. +func (in *InstanceTemplateFilesystemInitParameters) DeepCopy() *InstanceTemplateFilesystemInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateFilesystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateFilesystemObservation) DeepCopyInto(out *InstanceTemplateFilesystemObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateFilesystemObservation. +func (in *InstanceTemplateFilesystemObservation) DeepCopy() *InstanceTemplateFilesystemObservation { + if in == nil { + return nil + } + out := new(InstanceTemplateFilesystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateFilesystemParameters) DeepCopyInto(out *InstanceTemplateFilesystemParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateFilesystemParameters. +func (in *InstanceTemplateFilesystemParameters) DeepCopy() *InstanceTemplateFilesystemParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateFilesystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateInitParameters) DeepCopyInto(out *InstanceTemplateInitParameters) { + *out = *in + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]InstanceTemplateBootDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Filesystem != nil { + in, out := &in.Filesystem, &out.Filesystem + *out = make([]InstanceTemplateFilesystemInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = make([]InstanceTemplateMetadataOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]InstanceTemplateNetworkInterfaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSettings != nil { + in, out := &in.NetworkSettings, &out.NetworkSettings + *out = make([]NetworkSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementPolicy != nil { + in, out := &in.PlacementPolicy, &out.PlacementPolicy + *out = make([]InstanceTemplatePlacementPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]InstanceTemplateResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = make([]InstanceTemplateSchedulingPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryDisk != nil { + in, out := &in.SecondaryDisk, &out.SecondaryDisk + *out = make([]InstanceTemplateSecondaryDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateInitParameters. +func (in *InstanceTemplateInitParameters) DeepCopy() *InstanceTemplateInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateMetadataOptionsInitParameters) DeepCopyInto(out *InstanceTemplateMetadataOptionsInitParameters) { + *out = *in + if in.AwsV1HTTPEndpoint != nil { + in, out := &in.AwsV1HTTPEndpoint, &out.AwsV1HTTPEndpoint + *out = new(float64) + **out = **in + } + if in.AwsV1HTTPToken != nil { + in, out := &in.AwsV1HTTPToken, &out.AwsV1HTTPToken + *out = new(float64) + **out = **in + } + if in.GceHTTPEndpoint != nil { + in, out := &in.GceHTTPEndpoint, &out.GceHTTPEndpoint + *out = new(float64) + **out = **in + } + if in.GceHTTPToken != nil { + in, out := &in.GceHTTPToken, &out.GceHTTPToken + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateMetadataOptionsInitParameters. +func (in *InstanceTemplateMetadataOptionsInitParameters) DeepCopy() *InstanceTemplateMetadataOptionsInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateMetadataOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateMetadataOptionsObservation) DeepCopyInto(out *InstanceTemplateMetadataOptionsObservation) { + *out = *in + if in.AwsV1HTTPEndpoint != nil { + in, out := &in.AwsV1HTTPEndpoint, &out.AwsV1HTTPEndpoint + *out = new(float64) + **out = **in + } + if in.AwsV1HTTPToken != nil { + in, out := &in.AwsV1HTTPToken, &out.AwsV1HTTPToken + *out = new(float64) + **out = **in + } + if in.GceHTTPEndpoint != nil { + in, out := &in.GceHTTPEndpoint, &out.GceHTTPEndpoint + *out = new(float64) + **out = **in + } + if in.GceHTTPToken != nil { + in, out := &in.GceHTTPToken, &out.GceHTTPToken + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateMetadataOptionsObservation. +func (in *InstanceTemplateMetadataOptionsObservation) DeepCopy() *InstanceTemplateMetadataOptionsObservation { + if in == nil { + return nil + } + out := new(InstanceTemplateMetadataOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateMetadataOptionsParameters) DeepCopyInto(out *InstanceTemplateMetadataOptionsParameters) { + *out = *in + if in.AwsV1HTTPEndpoint != nil { + in, out := &in.AwsV1HTTPEndpoint, &out.AwsV1HTTPEndpoint + *out = new(float64) + **out = **in + } + if in.AwsV1HTTPToken != nil { + in, out := &in.AwsV1HTTPToken, &out.AwsV1HTTPToken + *out = new(float64) + **out = **in + } + if in.GceHTTPEndpoint != nil { + in, out := &in.GceHTTPEndpoint, &out.GceHTTPEndpoint + *out = new(float64) + **out = **in + } + if in.GceHTTPToken != nil { + in, out := &in.GceHTTPToken, &out.GceHTTPToken + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateMetadataOptionsParameters. +func (in *InstanceTemplateMetadataOptionsParameters) DeepCopy() *InstanceTemplateMetadataOptionsParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateMetadataOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateNetworkInterfaceInitParameters) DeepCopyInto(out *InstanceTemplateNetworkInterfaceInitParameters) { + *out = *in + if in.DNSRecord != nil { + in, out := &in.DNSRecord, &out.DNSRecord + *out = make([]NetworkInterfaceDNSRecordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.IPv6DNSRecord != nil { + in, out := &in.IPv6DNSRecord, &out.IPv6DNSRecord + *out = make([]NetworkInterfaceIPv6DNSRecordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATDNSRecord != nil { + in, out := &in.NATDNSRecord, &out.NATDNSRecord + *out = make([]NetworkInterfaceNATDNSRecordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateNetworkInterfaceInitParameters. +func (in *InstanceTemplateNetworkInterfaceInitParameters) DeepCopy() *InstanceTemplateNetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateNetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateNetworkInterfaceObservation) DeepCopyInto(out *InstanceTemplateNetworkInterfaceObservation) { + *out = *in + if in.DNSRecord != nil { + in, out := &in.DNSRecord, &out.DNSRecord + *out = make([]NetworkInterfaceDNSRecordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.IPv6DNSRecord != nil { + in, out := &in.IPv6DNSRecord, &out.IPv6DNSRecord + *out = make([]NetworkInterfaceIPv6DNSRecordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATDNSRecord != nil { + in, out := &in.NATDNSRecord, &out.NATDNSRecord + *out = make([]NetworkInterfaceNATDNSRecordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateNetworkInterfaceObservation. +func (in *InstanceTemplateNetworkInterfaceObservation) DeepCopy() *InstanceTemplateNetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(InstanceTemplateNetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateNetworkInterfaceParameters) DeepCopyInto(out *InstanceTemplateNetworkInterfaceParameters) { + *out = *in + if in.DNSRecord != nil { + in, out := &in.DNSRecord, &out.DNSRecord + *out = make([]NetworkInterfaceDNSRecordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.IPv6DNSRecord != nil { + in, out := &in.IPv6DNSRecord, &out.IPv6DNSRecord + *out = make([]NetworkInterfaceIPv6DNSRecordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATDNSRecord != nil { + in, out := &in.NATDNSRecord, &out.NATDNSRecord + *out = make([]NetworkInterfaceNATDNSRecordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateNetworkInterfaceParameters. +func (in *InstanceTemplateNetworkInterfaceParameters) DeepCopy() *InstanceTemplateNetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateNetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateObservation) DeepCopyInto(out *InstanceTemplateObservation) { + *out = *in + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]InstanceTemplateBootDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Filesystem != nil { + in, out := &in.Filesystem, &out.Filesystem + *out = make([]InstanceTemplateFilesystemObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = make([]InstanceTemplateMetadataOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]InstanceTemplateNetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSettings != nil { + in, out := &in.NetworkSettings, &out.NetworkSettings + *out = make([]NetworkSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementPolicy != nil { + in, out := &in.PlacementPolicy, &out.PlacementPolicy + *out = make([]InstanceTemplatePlacementPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]InstanceTemplateResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = make([]InstanceTemplateSchedulingPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryDisk != nil { + in, out := &in.SecondaryDisk, &out.SecondaryDisk + *out = make([]InstanceTemplateSecondaryDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateObservation. +func (in *InstanceTemplateObservation) DeepCopy() *InstanceTemplateObservation { + if in == nil { + return nil + } + out := new(InstanceTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateParameters) DeepCopyInto(out *InstanceTemplateParameters) { + *out = *in + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]InstanceTemplateBootDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Filesystem != nil { + in, out := &in.Filesystem, &out.Filesystem + *out = make([]InstanceTemplateFilesystemParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = make([]InstanceTemplateMetadataOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]InstanceTemplateNetworkInterfaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSettings != nil { + in, out := &in.NetworkSettings, &out.NetworkSettings + *out = make([]NetworkSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementPolicy != nil { + in, out := &in.PlacementPolicy, &out.PlacementPolicy + *out = make([]InstanceTemplatePlacementPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]InstanceTemplateResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = make([]InstanceTemplateSchedulingPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryDisk != nil { + in, out := &in.SecondaryDisk, &out.SecondaryDisk + *out = make([]InstanceTemplateSecondaryDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateParameters. +func (in *InstanceTemplateParameters) DeepCopy() *InstanceTemplateParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplatePlacementPolicyInitParameters) DeepCopyInto(out *InstanceTemplatePlacementPolicyInitParameters) { + *out = *in + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplatePlacementPolicyInitParameters. +func (in *InstanceTemplatePlacementPolicyInitParameters) DeepCopy() *InstanceTemplatePlacementPolicyInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplatePlacementPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplatePlacementPolicyObservation) DeepCopyInto(out *InstanceTemplatePlacementPolicyObservation) { + *out = *in + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplatePlacementPolicyObservation. +func (in *InstanceTemplatePlacementPolicyObservation) DeepCopy() *InstanceTemplatePlacementPolicyObservation { + if in == nil { + return nil + } + out := new(InstanceTemplatePlacementPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplatePlacementPolicyParameters) DeepCopyInto(out *InstanceTemplatePlacementPolicyParameters) { + *out = *in + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplatePlacementPolicyParameters. +func (in *InstanceTemplatePlacementPolicyParameters) DeepCopy() *InstanceTemplatePlacementPolicyParameters { + if in == nil { + return nil + } + out := new(InstanceTemplatePlacementPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateResourcesInitParameters) DeepCopyInto(out *InstanceTemplateResourcesInitParameters) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Gpus != nil { + in, out := &in.Gpus, &out.Gpus + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateResourcesInitParameters. +func (in *InstanceTemplateResourcesInitParameters) DeepCopy() *InstanceTemplateResourcesInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateResourcesObservation) DeepCopyInto(out *InstanceTemplateResourcesObservation) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Gpus != nil { + in, out := &in.Gpus, &out.Gpus + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateResourcesObservation. +func (in *InstanceTemplateResourcesObservation) DeepCopy() *InstanceTemplateResourcesObservation { + if in == nil { + return nil + } + out := new(InstanceTemplateResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateResourcesParameters) DeepCopyInto(out *InstanceTemplateResourcesParameters) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Gpus != nil { + in, out := &in.Gpus, &out.Gpus + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateResourcesParameters. +func (in *InstanceTemplateResourcesParameters) DeepCopy() *InstanceTemplateResourcesParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateSchedulingPolicyInitParameters) DeepCopyInto(out *InstanceTemplateSchedulingPolicyInitParameters) { + *out = *in + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateSchedulingPolicyInitParameters. +func (in *InstanceTemplateSchedulingPolicyInitParameters) DeepCopy() *InstanceTemplateSchedulingPolicyInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateSchedulingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateSchedulingPolicyObservation) DeepCopyInto(out *InstanceTemplateSchedulingPolicyObservation) { + *out = *in + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateSchedulingPolicyObservation. +func (in *InstanceTemplateSchedulingPolicyObservation) DeepCopy() *InstanceTemplateSchedulingPolicyObservation { + if in == nil { + return nil + } + out := new(InstanceTemplateSchedulingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateSchedulingPolicyParameters) DeepCopyInto(out *InstanceTemplateSchedulingPolicyParameters) { + *out = *in + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateSchedulingPolicyParameters. +func (in *InstanceTemplateSchedulingPolicyParameters) DeepCopy() *InstanceTemplateSchedulingPolicyParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateSchedulingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateSecondaryDiskInitParameters) DeepCopyInto(out *InstanceTemplateSecondaryDiskInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]SecondaryDiskInitializeParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateSecondaryDiskInitParameters. +func (in *InstanceTemplateSecondaryDiskInitParameters) DeepCopy() *InstanceTemplateSecondaryDiskInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateSecondaryDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateSecondaryDiskObservation) DeepCopyInto(out *InstanceTemplateSecondaryDiskObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]SecondaryDiskInitializeParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateSecondaryDiskObservation. +func (in *InstanceTemplateSecondaryDiskObservation) DeepCopy() *InstanceTemplateSecondaryDiskObservation { + if in == nil { + return nil + } + out := new(InstanceTemplateSecondaryDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateSecondaryDiskParameters) DeepCopyInto(out *InstanceTemplateSecondaryDiskParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]SecondaryDiskInitializeParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateSecondaryDiskParameters. +func (in *InstanceTemplateSecondaryDiskParameters) DeepCopy() *InstanceTemplateSecondaryDiskParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateSecondaryDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesInitParameters) DeepCopyInto(out *InstancesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesInitParameters. +func (in *InstancesInitParameters) DeepCopy() *InstancesInitParameters { + if in == nil { + return nil + } + out := new(InstancesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesNetworkInterfaceInitParameters) DeepCopyInto(out *InstancesNetworkInterfaceInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesNetworkInterfaceInitParameters. +func (in *InstancesNetworkInterfaceInitParameters) DeepCopy() *InstancesNetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(InstancesNetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesNetworkInterfaceObservation) DeepCopyInto(out *InstancesNetworkInterfaceObservation) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(float64) + **out = **in + } + if in.MacAddress != nil { + in, out := &in.MacAddress, &out.MacAddress + *out = new(string) + **out = **in + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.NATIPVersion != nil { + in, out := &in.NATIPVersion, &out.NATIPVersion + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesNetworkInterfaceObservation. +func (in *InstancesNetworkInterfaceObservation) DeepCopy() *InstancesNetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(InstancesNetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesNetworkInterfaceParameters) DeepCopyInto(out *InstancesNetworkInterfaceParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesNetworkInterfaceParameters. +func (in *InstancesNetworkInterfaceParameters) DeepCopy() *InstancesNetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(InstancesNetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesObservation) DeepCopyInto(out *InstancesObservation) { + *out = *in + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceTag != nil { + in, out := &in.InstanceTag, &out.InstanceTag + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]InstancesNetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.StatusChangedAt != nil { + in, out := &in.StatusChangedAt, &out.StatusChangedAt + *out = new(string) + **out = **in + } + if in.StatusMessage != nil { + in, out := &in.StatusMessage, &out.StatusMessage + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesObservation. +func (in *InstancesObservation) DeepCopy() *InstancesObservation { + if in == nil { + return nil + } + out := new(InstancesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesParameters) DeepCopyInto(out *InstancesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesParameters. +func (in *InstancesParameters) DeepCopy() *InstancesParameters { + if in == nil { + return nil + } + out := new(InstancesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LegacyFeaturesInitParameters) DeepCopyInto(out *LegacyFeaturesInitParameters) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyFeaturesInitParameters. +func (in *LegacyFeaturesInitParameters) DeepCopy() *LegacyFeaturesInitParameters { + if in == nil { + return nil + } + out := new(LegacyFeaturesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LegacyFeaturesObservation) DeepCopyInto(out *LegacyFeaturesObservation) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyFeaturesObservation. +func (in *LegacyFeaturesObservation) DeepCopy() *LegacyFeaturesObservation { + if in == nil { + return nil + } + out := new(LegacyFeaturesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LegacyFeaturesParameters) DeepCopyInto(out *LegacyFeaturesParameters) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyFeaturesParameters. +func (in *LegacyFeaturesParameters) DeepCopy() *LegacyFeaturesParameters { + if in == nil { + return nil + } + out := new(LegacyFeaturesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerInitParameters) DeepCopyInto(out *LoadBalancerInitParameters) { + *out = *in + if in.IgnoreHealthChecks != nil { + in, out := &in.IgnoreHealthChecks, &out.IgnoreHealthChecks + *out = new(bool) + **out = **in + } + if in.MaxOpeningTrafficDuration != nil { + in, out := &in.MaxOpeningTrafficDuration, &out.MaxOpeningTrafficDuration + *out = new(float64) + **out = **in + } + if in.TargetGroupDescription != nil { + in, out := &in.TargetGroupDescription, &out.TargetGroupDescription + *out = new(string) + **out = **in + } + if in.TargetGroupLabels != nil { + in, out := &in.TargetGroupLabels, &out.TargetGroupLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetGroupName != nil { + in, out := &in.TargetGroupName, &out.TargetGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerInitParameters. +func (in *LoadBalancerInitParameters) DeepCopy() *LoadBalancerInitParameters { + if in == nil { + return nil + } + out := new(LoadBalancerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerObservation) DeepCopyInto(out *LoadBalancerObservation) { + *out = *in + if in.IgnoreHealthChecks != nil { + in, out := &in.IgnoreHealthChecks, &out.IgnoreHealthChecks + *out = new(bool) + **out = **in + } + if in.MaxOpeningTrafficDuration != nil { + in, out := &in.MaxOpeningTrafficDuration, &out.MaxOpeningTrafficDuration + *out = new(float64) + **out = **in + } + if in.StatusMessage != nil { + in, out := &in.StatusMessage, &out.StatusMessage + *out = new(string) + **out = **in + } + if in.TargetGroupDescription != nil { + in, out := &in.TargetGroupDescription, &out.TargetGroupDescription + *out = new(string) + **out = **in + } + if in.TargetGroupID != nil { + in, out := &in.TargetGroupID, &out.TargetGroupID + *out = new(string) + **out = **in + } + if in.TargetGroupLabels != nil { + in, out := &in.TargetGroupLabels, &out.TargetGroupLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetGroupName != nil { + in, out := &in.TargetGroupName, &out.TargetGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerObservation. +func (in *LoadBalancerObservation) DeepCopy() *LoadBalancerObservation { + if in == nil { + return nil + } + out := new(LoadBalancerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerParameters) DeepCopyInto(out *LoadBalancerParameters) { + *out = *in + if in.IgnoreHealthChecks != nil { + in, out := &in.IgnoreHealthChecks, &out.IgnoreHealthChecks + *out = new(bool) + **out = **in + } + if in.MaxOpeningTrafficDuration != nil { + in, out := &in.MaxOpeningTrafficDuration, &out.MaxOpeningTrafficDuration + *out = new(float64) + **out = **in + } + if in.TargetGroupDescription != nil { + in, out := &in.TargetGroupDescription, &out.TargetGroupDescription + *out = new(string) + **out = **in + } + if in.TargetGroupLabels != nil { + in, out := &in.TargetGroupLabels, &out.TargetGroupLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetGroupName != nil { + in, out := &in.TargetGroupName, &out.TargetGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerParameters. +func (in *LoadBalancerParameters) DeepCopy() *LoadBalancerParameters { + if in == nil { + return nil + } + out := new(LoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalDiskInitParameters) DeepCopyInto(out *LocalDiskInitParameters) { + *out = *in + if in.SizeBytes != nil { + in, out := &in.SizeBytes, &out.SizeBytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalDiskInitParameters. +func (in *LocalDiskInitParameters) DeepCopy() *LocalDiskInitParameters { + if in == nil { + return nil + } + out := new(LocalDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalDiskObservation) DeepCopyInto(out *LocalDiskObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.SizeBytes != nil { + in, out := &in.SizeBytes, &out.SizeBytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalDiskObservation. +func (in *LocalDiskObservation) DeepCopy() *LocalDiskObservation { + if in == nil { + return nil + } + out := new(LocalDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalDiskParameters) DeepCopyInto(out *LocalDiskParameters) { + *out = *in + if in.SizeBytes != nil { + in, out := &in.SizeBytes, &out.SizeBytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalDiskParameters. +func (in *LocalDiskParameters) DeepCopy() *LocalDiskParameters { + if in == nil { + return nil + } + out := new(LocalDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataOptionsInitParameters) DeepCopyInto(out *MetadataOptionsInitParameters) { + *out = *in + if in.AwsV1HTTPEndpoint != nil { + in, out := &in.AwsV1HTTPEndpoint, &out.AwsV1HTTPEndpoint + *out = new(float64) + **out = **in + } + if in.AwsV1HTTPToken != nil { + in, out := &in.AwsV1HTTPToken, &out.AwsV1HTTPToken + *out = new(float64) + **out = **in + } + if in.GceHTTPEndpoint != nil { + in, out := &in.GceHTTPEndpoint, &out.GceHTTPEndpoint + *out = new(float64) + **out = **in + } + if in.GceHTTPToken != nil { + in, out := &in.GceHTTPToken, &out.GceHTTPToken + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataOptionsInitParameters. +func (in *MetadataOptionsInitParameters) DeepCopy() *MetadataOptionsInitParameters { + if in == nil { + return nil + } + out := new(MetadataOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataOptionsObservation) DeepCopyInto(out *MetadataOptionsObservation) { + *out = *in + if in.AwsV1HTTPEndpoint != nil { + in, out := &in.AwsV1HTTPEndpoint, &out.AwsV1HTTPEndpoint + *out = new(float64) + **out = **in + } + if in.AwsV1HTTPToken != nil { + in, out := &in.AwsV1HTTPToken, &out.AwsV1HTTPToken + *out = new(float64) + **out = **in + } + if in.GceHTTPEndpoint != nil { + in, out := &in.GceHTTPEndpoint, &out.GceHTTPEndpoint + *out = new(float64) + **out = **in + } + if in.GceHTTPToken != nil { + in, out := &in.GceHTTPToken, &out.GceHTTPToken + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataOptionsObservation. +func (in *MetadataOptionsObservation) DeepCopy() *MetadataOptionsObservation { + if in == nil { + return nil + } + out := new(MetadataOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataOptionsParameters) DeepCopyInto(out *MetadataOptionsParameters) { + *out = *in + if in.AwsV1HTTPEndpoint != nil { + in, out := &in.AwsV1HTTPEndpoint, &out.AwsV1HTTPEndpoint + *out = new(float64) + **out = **in + } + if in.AwsV1HTTPToken != nil { + in, out := &in.AwsV1HTTPToken, &out.AwsV1HTTPToken + *out = new(float64) + **out = **in + } + if in.GceHTTPEndpoint != nil { + in, out := &in.GceHTTPEndpoint, &out.GceHTTPEndpoint + *out = new(float64) + **out = **in + } + if in.GceHTTPToken != nil { + in, out := &in.GceHTTPToken, &out.GceHTTPToken + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataOptionsParameters. +func (in *MetadataOptionsParameters) DeepCopy() *MetadataOptionsParameters { + if in == nil { + return nil + } + out := new(MetadataOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NATDNSRecordInitParameters) DeepCopyInto(out *NATDNSRecordInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NATDNSRecordInitParameters. +func (in *NATDNSRecordInitParameters) DeepCopy() *NATDNSRecordInitParameters { + if in == nil { + return nil + } + out := new(NATDNSRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NATDNSRecordObservation) DeepCopyInto(out *NATDNSRecordObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NATDNSRecordObservation. +func (in *NATDNSRecordObservation) DeepCopy() *NATDNSRecordObservation { + if in == nil { + return nil + } + out := new(NATDNSRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NATDNSRecordParameters) DeepCopyInto(out *NATDNSRecordParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NATDNSRecordParameters. +func (in *NATDNSRecordParameters) DeepCopy() *NATDNSRecordParameters { + if in == nil { + return nil + } + out := new(NATDNSRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceDNSRecordInitParameters) DeepCopyInto(out *NetworkInterfaceDNSRecordInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceDNSRecordInitParameters. +func (in *NetworkInterfaceDNSRecordInitParameters) DeepCopy() *NetworkInterfaceDNSRecordInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceDNSRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceDNSRecordObservation) DeepCopyInto(out *NetworkInterfaceDNSRecordObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceDNSRecordObservation. +func (in *NetworkInterfaceDNSRecordObservation) DeepCopy() *NetworkInterfaceDNSRecordObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceDNSRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceDNSRecordParameters) DeepCopyInto(out *NetworkInterfaceDNSRecordParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceDNSRecordParameters. +func (in *NetworkInterfaceDNSRecordParameters) DeepCopy() *NetworkInterfaceDNSRecordParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceDNSRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceIPv6DNSRecordInitParameters) DeepCopyInto(out *NetworkInterfaceIPv6DNSRecordInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPv6DNSRecordInitParameters. +func (in *NetworkInterfaceIPv6DNSRecordInitParameters) DeepCopy() *NetworkInterfaceIPv6DNSRecordInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceIPv6DNSRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceIPv6DNSRecordObservation) DeepCopyInto(out *NetworkInterfaceIPv6DNSRecordObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPv6DNSRecordObservation. +func (in *NetworkInterfaceIPv6DNSRecordObservation) DeepCopy() *NetworkInterfaceIPv6DNSRecordObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceIPv6DNSRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceIPv6DNSRecordParameters) DeepCopyInto(out *NetworkInterfaceIPv6DNSRecordParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPv6DNSRecordParameters. +func (in *NetworkInterfaceIPv6DNSRecordParameters) DeepCopy() *NetworkInterfaceIPv6DNSRecordParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceIPv6DNSRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceInitParameters) DeepCopyInto(out *NetworkInterfaceInitParameters) { + *out = *in + if in.DNSRecord != nil { + in, out := &in.DNSRecord, &out.DNSRecord + *out = make([]DNSRecordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.IPv6DNSRecord != nil { + in, out := &in.IPv6DNSRecord, &out.IPv6DNSRecord + *out = make([]IPv6DNSRecordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(float64) + **out = **in + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATDNSRecord != nil { + in, out := &in.NATDNSRecord, &out.NATDNSRecord + *out = make([]NATDNSRecordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceInitParameters. +func (in *NetworkInterfaceInitParameters) DeepCopy() *NetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceNATDNSRecordInitParameters) DeepCopyInto(out *NetworkInterfaceNATDNSRecordInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceNATDNSRecordInitParameters. +func (in *NetworkInterfaceNATDNSRecordInitParameters) DeepCopy() *NetworkInterfaceNATDNSRecordInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceNATDNSRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceNATDNSRecordObservation) DeepCopyInto(out *NetworkInterfaceNATDNSRecordObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceNATDNSRecordObservation. +func (in *NetworkInterfaceNATDNSRecordObservation) DeepCopy() *NetworkInterfaceNATDNSRecordObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceNATDNSRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceNATDNSRecordParameters) DeepCopyInto(out *NetworkInterfaceNATDNSRecordParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceNATDNSRecordParameters. +func (in *NetworkInterfaceNATDNSRecordParameters) DeepCopy() *NetworkInterfaceNATDNSRecordParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceNATDNSRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceObservation) DeepCopyInto(out *NetworkInterfaceObservation) { + *out = *in + if in.DNSRecord != nil { + in, out := &in.DNSRecord, &out.DNSRecord + *out = make([]DNSRecordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.IPv6DNSRecord != nil { + in, out := &in.IPv6DNSRecord, &out.IPv6DNSRecord + *out = make([]IPv6DNSRecordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(float64) + **out = **in + } + if in.MacAddress != nil { + in, out := &in.MacAddress, &out.MacAddress + *out = new(string) + **out = **in + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATDNSRecord != nil { + in, out := &in.NATDNSRecord, &out.NATDNSRecord + *out = make([]NATDNSRecordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.NATIPVersion != nil { + in, out := &in.NATIPVersion, &out.NATIPVersion + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceObservation. +func (in *NetworkInterfaceObservation) DeepCopy() *NetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceParameters) DeepCopyInto(out *NetworkInterfaceParameters) { + *out = *in + if in.DNSRecord != nil { + in, out := &in.DNSRecord, &out.DNSRecord + *out = make([]DNSRecordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.IPv6DNSRecord != nil { + in, out := &in.IPv6DNSRecord, &out.IPv6DNSRecord + *out = make([]IPv6DNSRecordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(float64) + **out = **in + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATDNSRecord != nil { + in, out := &in.NATDNSRecord, &out.NATDNSRecord + *out = make([]NATDNSRecordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceParameters. +func (in *NetworkInterfaceParameters) DeepCopy() *NetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSettingsInitParameters) DeepCopyInto(out *NetworkSettingsInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSettingsInitParameters. +func (in *NetworkSettingsInitParameters) DeepCopy() *NetworkSettingsInitParameters { + if in == nil { + return nil + } + out := new(NetworkSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSettingsObservation) DeepCopyInto(out *NetworkSettingsObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSettingsObservation. +func (in *NetworkSettingsObservation) DeepCopy() *NetworkSettingsObservation { + if in == nil { + return nil + } + out := new(NetworkSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSettingsParameters) DeepCopyInto(out *NetworkSettingsParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSettingsParameters. +func (in *NetworkSettingsParameters) DeepCopy() *NetworkSettingsParameters { + if in == nil { + return nil + } + out := new(NetworkSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroup) DeepCopyInto(out *PlacementGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroup. +func (in *PlacementGroup) DeepCopy() *PlacementGroup { + if in == nil { + return nil + } + out := new(PlacementGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlacementGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBinding) DeepCopyInto(out *PlacementGroupIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBinding. +func (in *PlacementGroupIAMBinding) DeepCopy() *PlacementGroupIAMBinding { + if in == nil { + return nil + } + out := new(PlacementGroupIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlacementGroupIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBindingInitParameters) DeepCopyInto(out *PlacementGroupIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingInitParameters. +func (in *PlacementGroupIAMBindingInitParameters) DeepCopy() *PlacementGroupIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(PlacementGroupIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBindingList) DeepCopyInto(out *PlacementGroupIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PlacementGroupIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingList. +func (in *PlacementGroupIAMBindingList) DeepCopy() *PlacementGroupIAMBindingList { + if in == nil { + return nil + } + out := new(PlacementGroupIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlacementGroupIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBindingObservation) DeepCopyInto(out *PlacementGroupIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingObservation. +func (in *PlacementGroupIAMBindingObservation) DeepCopy() *PlacementGroupIAMBindingObservation { + if in == nil { + return nil + } + out := new(PlacementGroupIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBindingParameters) DeepCopyInto(out *PlacementGroupIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingParameters. +func (in *PlacementGroupIAMBindingParameters) DeepCopy() *PlacementGroupIAMBindingParameters { + if in == nil { + return nil + } + out := new(PlacementGroupIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBindingSpec) DeepCopyInto(out *PlacementGroupIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingSpec. +func (in *PlacementGroupIAMBindingSpec) DeepCopy() *PlacementGroupIAMBindingSpec { + if in == nil { + return nil + } + out := new(PlacementGroupIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBindingStatus) DeepCopyInto(out *PlacementGroupIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingStatus. +func (in *PlacementGroupIAMBindingStatus) DeepCopy() *PlacementGroupIAMBindingStatus { + if in == nil { + return nil + } + out := new(PlacementGroupIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupInitParameters) DeepCopyInto(out *PlacementGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlacementStrategyPartitions != nil { + in, out := &in.PlacementStrategyPartitions, &out.PlacementStrategyPartitions + *out = new(float64) + **out = **in + } + if in.PlacementStrategySpread != nil { + in, out := &in.PlacementStrategySpread, &out.PlacementStrategySpread + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupInitParameters. +func (in *PlacementGroupInitParameters) DeepCopy() *PlacementGroupInitParameters { + if in == nil { + return nil + } + out := new(PlacementGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupList) DeepCopyInto(out *PlacementGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PlacementGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupList. +func (in *PlacementGroupList) DeepCopy() *PlacementGroupList { + if in == nil { + return nil + } + out := new(PlacementGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlacementGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupObservation) DeepCopyInto(out *PlacementGroupObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlacementStrategyPartitions != nil { + in, out := &in.PlacementStrategyPartitions, &out.PlacementStrategyPartitions + *out = new(float64) + **out = **in + } + if in.PlacementStrategySpread != nil { + in, out := &in.PlacementStrategySpread, &out.PlacementStrategySpread + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupObservation. +func (in *PlacementGroupObservation) DeepCopy() *PlacementGroupObservation { + if in == nil { + return nil + } + out := new(PlacementGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupParameters) DeepCopyInto(out *PlacementGroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlacementStrategyPartitions != nil { + in, out := &in.PlacementStrategyPartitions, &out.PlacementStrategyPartitions + *out = new(float64) + **out = **in + } + if in.PlacementStrategySpread != nil { + in, out := &in.PlacementStrategySpread, &out.PlacementStrategySpread + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupParameters. +func (in *PlacementGroupParameters) DeepCopy() *PlacementGroupParameters { + if in == nil { + return nil + } + out := new(PlacementGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupSpec) DeepCopyInto(out *PlacementGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupSpec. +func (in *PlacementGroupSpec) DeepCopy() *PlacementGroupSpec { + if in == nil { + return nil + } + out := new(PlacementGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupStatus) DeepCopyInto(out *PlacementGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupStatus. +func (in *PlacementGroupStatus) DeepCopy() *PlacementGroupStatus { + if in == nil { + return nil + } + out := new(PlacementGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementPolicyInitParameters) DeepCopyInto(out *PlacementPolicyInitParameters) { + *out = *in + if in.HostAffinityRules != nil { + in, out := &in.HostAffinityRules, &out.HostAffinityRules + *out = make([]HostAffinityRulesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.PlacementGroupPartition != nil { + in, out := &in.PlacementGroupPartition, &out.PlacementGroupPartition + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicyInitParameters. +func (in *PlacementPolicyInitParameters) DeepCopy() *PlacementPolicyInitParameters { + if in == nil { + return nil + } + out := new(PlacementPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementPolicyObservation) DeepCopyInto(out *PlacementPolicyObservation) { + *out = *in + if in.HostAffinityRules != nil { + in, out := &in.HostAffinityRules, &out.HostAffinityRules + *out = make([]HostAffinityRulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.PlacementGroupPartition != nil { + in, out := &in.PlacementGroupPartition, &out.PlacementGroupPartition + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicyObservation. +func (in *PlacementPolicyObservation) DeepCopy() *PlacementPolicyObservation { + if in == nil { + return nil + } + out := new(PlacementPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementPolicyParameters) DeepCopyInto(out *PlacementPolicyParameters) { + *out = *in + if in.HostAffinityRules != nil { + in, out := &in.HostAffinityRules, &out.HostAffinityRules + *out = make([]HostAffinityRulesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.PlacementGroupPartition != nil { + in, out := &in.PlacementGroupPartition, &out.PlacementGroupPartition + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicyParameters. +func (in *PlacementPolicyParameters) DeepCopy() *PlacementPolicyParameters { + if in == nil { + return nil + } + out := new(PlacementPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesInitParameters) DeepCopyInto(out *ResourcesInitParameters) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Gpus != nil { + in, out := &in.Gpus, &out.Gpus + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesInitParameters. +func (in *ResourcesInitParameters) DeepCopy() *ResourcesInitParameters { + if in == nil { + return nil + } + out := new(ResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesObservation) DeepCopyInto(out *ResourcesObservation) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Gpus != nil { + in, out := &in.Gpus, &out.Gpus + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesObservation. +func (in *ResourcesObservation) DeepCopy() *ResourcesObservation { + if in == nil { + return nil + } + out := new(ResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesParameters) DeepCopyInto(out *ResourcesParameters) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Gpus != nil { + in, out := &in.Gpus, &out.Gpus + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesParameters. +func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { + if in == nil { + return nil + } + out := new(ResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalePolicyInitParameters) DeepCopyInto(out *ScalePolicyInitParameters) { + *out = *in + if in.AutoScale != nil { + in, out := &in.AutoScale, &out.AutoScale + *out = make([]AutoScaleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FixedScale != nil { + in, out := &in.FixedScale, &out.FixedScale + *out = make([]FixedScaleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TestAutoScale != nil { + in, out := &in.TestAutoScale, &out.TestAutoScale + *out = make([]TestAutoScaleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalePolicyInitParameters. +func (in *ScalePolicyInitParameters) DeepCopy() *ScalePolicyInitParameters { + if in == nil { + return nil + } + out := new(ScalePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalePolicyObservation) DeepCopyInto(out *ScalePolicyObservation) { + *out = *in + if in.AutoScale != nil { + in, out := &in.AutoScale, &out.AutoScale + *out = make([]AutoScaleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FixedScale != nil { + in, out := &in.FixedScale, &out.FixedScale + *out = make([]FixedScaleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TestAutoScale != nil { + in, out := &in.TestAutoScale, &out.TestAutoScale + *out = make([]TestAutoScaleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalePolicyObservation. +func (in *ScalePolicyObservation) DeepCopy() *ScalePolicyObservation { + if in == nil { + return nil + } + out := new(ScalePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalePolicyParameters) DeepCopyInto(out *ScalePolicyParameters) { + *out = *in + if in.AutoScale != nil { + in, out := &in.AutoScale, &out.AutoScale + *out = make([]AutoScaleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FixedScale != nil { + in, out := &in.FixedScale, &out.FixedScale + *out = make([]FixedScaleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TestAutoScale != nil { + in, out := &in.TestAutoScale, &out.TestAutoScale + *out = make([]TestAutoScaleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalePolicyParameters. +func (in *ScalePolicyParameters) DeepCopy() *ScalePolicyParameters { + if in == nil { + return nil + } + out := new(ScalePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulePolicyInitParameters) DeepCopyInto(out *SchedulePolicyInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.StartAt != nil { + in, out := &in.StartAt, &out.StartAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePolicyInitParameters. +func (in *SchedulePolicyInitParameters) DeepCopy() *SchedulePolicyInitParameters { + if in == nil { + return nil + } + out := new(SchedulePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulePolicyObservation) DeepCopyInto(out *SchedulePolicyObservation) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.StartAt != nil { + in, out := &in.StartAt, &out.StartAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePolicyObservation. +func (in *SchedulePolicyObservation) DeepCopy() *SchedulePolicyObservation { + if in == nil { + return nil + } + out := new(SchedulePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulePolicyParameters) DeepCopyInto(out *SchedulePolicyParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.StartAt != nil { + in, out := &in.StartAt, &out.StartAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePolicyParameters. +func (in *SchedulePolicyParameters) DeepCopy() *SchedulePolicyParameters { + if in == nil { + return nil + } + out := new(SchedulePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyInitParameters) DeepCopyInto(out *SchedulingPolicyInitParameters) { + *out = *in + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyInitParameters. +func (in *SchedulingPolicyInitParameters) DeepCopy() *SchedulingPolicyInitParameters { + if in == nil { + return nil + } + out := new(SchedulingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyObservation) DeepCopyInto(out *SchedulingPolicyObservation) { + *out = *in + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyObservation. +func (in *SchedulingPolicyObservation) DeepCopy() *SchedulingPolicyObservation { + if in == nil { + return nil + } + out := new(SchedulingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyParameters) DeepCopyInto(out *SchedulingPolicyParameters) { + *out = *in + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyParameters. +func (in *SchedulingPolicyParameters) DeepCopy() *SchedulingPolicyParameters { + if in == nil { + return nil + } + out := new(SchedulingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryDiskInitParameters) DeepCopyInto(out *SecondaryDiskInitParameters) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskInitParameters. +func (in *SecondaryDiskInitParameters) DeepCopy() *SecondaryDiskInitParameters { + if in == nil { + return nil + } + out := new(SecondaryDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryDiskInitializeParamsInitParameters) DeepCopyInto(out *SecondaryDiskInitializeParamsInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskInitializeParamsInitParameters. +func (in *SecondaryDiskInitializeParamsInitParameters) DeepCopy() *SecondaryDiskInitializeParamsInitParameters { + if in == nil { + return nil + } + out := new(SecondaryDiskInitializeParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryDiskInitializeParamsObservation) DeepCopyInto(out *SecondaryDiskInitializeParamsObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskInitializeParamsObservation. +func (in *SecondaryDiskInitializeParamsObservation) DeepCopy() *SecondaryDiskInitializeParamsObservation { + if in == nil { + return nil + } + out := new(SecondaryDiskInitializeParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryDiskInitializeParamsParameters) DeepCopyInto(out *SecondaryDiskInitializeParamsParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskInitializeParamsParameters. +func (in *SecondaryDiskInitializeParamsParameters) DeepCopy() *SecondaryDiskInitializeParamsParameters { + if in == nil { + return nil + } + out := new(SecondaryDiskInitializeParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryDiskObservation) DeepCopyInto(out *SecondaryDiskObservation) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskObservation. +func (in *SecondaryDiskObservation) DeepCopy() *SecondaryDiskObservation { + if in == nil { + return nil + } + out := new(SecondaryDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryDiskParameters) DeepCopyInto(out *SecondaryDiskParameters) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskParameters. +func (in *SecondaryDiskParameters) DeepCopy() *SecondaryDiskParameters { + if in == nil { + return nil + } + out := new(SecondaryDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Snapshot) DeepCopyInto(out *Snapshot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Snapshot. +func (in *Snapshot) DeepCopy() *Snapshot { + if in == nil { + return nil + } + out := new(Snapshot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Snapshot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotHardwareGenerationGeneration2FeaturesInitParameters) DeepCopyInto(out *SnapshotHardwareGenerationGeneration2FeaturesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotHardwareGenerationGeneration2FeaturesInitParameters. +func (in *SnapshotHardwareGenerationGeneration2FeaturesInitParameters) DeepCopy() *SnapshotHardwareGenerationGeneration2FeaturesInitParameters { + if in == nil { + return nil + } + out := new(SnapshotHardwareGenerationGeneration2FeaturesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotHardwareGenerationGeneration2FeaturesObservation) DeepCopyInto(out *SnapshotHardwareGenerationGeneration2FeaturesObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotHardwareGenerationGeneration2FeaturesObservation. +func (in *SnapshotHardwareGenerationGeneration2FeaturesObservation) DeepCopy() *SnapshotHardwareGenerationGeneration2FeaturesObservation { + if in == nil { + return nil + } + out := new(SnapshotHardwareGenerationGeneration2FeaturesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotHardwareGenerationGeneration2FeaturesParameters) DeepCopyInto(out *SnapshotHardwareGenerationGeneration2FeaturesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotHardwareGenerationGeneration2FeaturesParameters. +func (in *SnapshotHardwareGenerationGeneration2FeaturesParameters) DeepCopy() *SnapshotHardwareGenerationGeneration2FeaturesParameters { + if in == nil { + return nil + } + out := new(SnapshotHardwareGenerationGeneration2FeaturesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotHardwareGenerationInitParameters) DeepCopyInto(out *SnapshotHardwareGenerationInitParameters) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]SnapshotHardwareGenerationGeneration2FeaturesInitParameters, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]SnapshotHardwareGenerationLegacyFeaturesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotHardwareGenerationInitParameters. +func (in *SnapshotHardwareGenerationInitParameters) DeepCopy() *SnapshotHardwareGenerationInitParameters { + if in == nil { + return nil + } + out := new(SnapshotHardwareGenerationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotHardwareGenerationLegacyFeaturesInitParameters) DeepCopyInto(out *SnapshotHardwareGenerationLegacyFeaturesInitParameters) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotHardwareGenerationLegacyFeaturesInitParameters. +func (in *SnapshotHardwareGenerationLegacyFeaturesInitParameters) DeepCopy() *SnapshotHardwareGenerationLegacyFeaturesInitParameters { + if in == nil { + return nil + } + out := new(SnapshotHardwareGenerationLegacyFeaturesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotHardwareGenerationLegacyFeaturesObservation) DeepCopyInto(out *SnapshotHardwareGenerationLegacyFeaturesObservation) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotHardwareGenerationLegacyFeaturesObservation. +func (in *SnapshotHardwareGenerationLegacyFeaturesObservation) DeepCopy() *SnapshotHardwareGenerationLegacyFeaturesObservation { + if in == nil { + return nil + } + out := new(SnapshotHardwareGenerationLegacyFeaturesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotHardwareGenerationLegacyFeaturesParameters) DeepCopyInto(out *SnapshotHardwareGenerationLegacyFeaturesParameters) { + *out = *in + if in.PciTopology != nil { + in, out := &in.PciTopology, &out.PciTopology + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotHardwareGenerationLegacyFeaturesParameters. +func (in *SnapshotHardwareGenerationLegacyFeaturesParameters) DeepCopy() *SnapshotHardwareGenerationLegacyFeaturesParameters { + if in == nil { + return nil + } + out := new(SnapshotHardwareGenerationLegacyFeaturesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotHardwareGenerationObservation) DeepCopyInto(out *SnapshotHardwareGenerationObservation) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]SnapshotHardwareGenerationGeneration2FeaturesParameters, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]SnapshotHardwareGenerationLegacyFeaturesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotHardwareGenerationObservation. +func (in *SnapshotHardwareGenerationObservation) DeepCopy() *SnapshotHardwareGenerationObservation { + if in == nil { + return nil + } + out := new(SnapshotHardwareGenerationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotHardwareGenerationParameters) DeepCopyInto(out *SnapshotHardwareGenerationParameters) { + *out = *in + if in.Generation2Features != nil { + in, out := &in.Generation2Features, &out.Generation2Features + *out = make([]SnapshotHardwareGenerationGeneration2FeaturesParameters, len(*in)) + copy(*out, *in) + } + if in.LegacyFeatures != nil { + in, out := &in.LegacyFeatures, &out.LegacyFeatures + *out = make([]SnapshotHardwareGenerationLegacyFeaturesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotHardwareGenerationParameters. +func (in *SnapshotHardwareGenerationParameters) DeepCopy() *SnapshotHardwareGenerationParameters { + if in == nil { + return nil + } + out := new(SnapshotHardwareGenerationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBinding) DeepCopyInto(out *SnapshotIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBinding. +func (in *SnapshotIAMBinding) DeepCopy() *SnapshotIAMBinding { + if in == nil { + return nil + } + out := new(SnapshotIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBindingInitParameters) DeepCopyInto(out *SnapshotIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingInitParameters. +func (in *SnapshotIAMBindingInitParameters) DeepCopy() *SnapshotIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(SnapshotIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBindingList) DeepCopyInto(out *SnapshotIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SnapshotIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingList. +func (in *SnapshotIAMBindingList) DeepCopy() *SnapshotIAMBindingList { + if in == nil { + return nil + } + out := new(SnapshotIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBindingObservation) DeepCopyInto(out *SnapshotIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingObservation. +func (in *SnapshotIAMBindingObservation) DeepCopy() *SnapshotIAMBindingObservation { + if in == nil { + return nil + } + out := new(SnapshotIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBindingParameters) DeepCopyInto(out *SnapshotIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingParameters. +func (in *SnapshotIAMBindingParameters) DeepCopy() *SnapshotIAMBindingParameters { + if in == nil { + return nil + } + out := new(SnapshotIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBindingSpec) DeepCopyInto(out *SnapshotIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingSpec. +func (in *SnapshotIAMBindingSpec) DeepCopy() *SnapshotIAMBindingSpec { + if in == nil { + return nil + } + out := new(SnapshotIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBindingStatus) DeepCopyInto(out *SnapshotIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingStatus. +func (in *SnapshotIAMBindingStatus) DeepCopy() *SnapshotIAMBindingStatus { + if in == nil { + return nil + } + out := new(SnapshotIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotInitParameters) DeepCopyInto(out *SnapshotInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]SnapshotHardwareGenerationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceDiskID != nil { + in, out := &in.SourceDiskID, &out.SourceDiskID + *out = new(string) + **out = **in + } + if in.SourceDiskIDRef != nil { + in, out := &in.SourceDiskIDRef, &out.SourceDiskIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceDiskIDSelector != nil { + in, out := &in.SourceDiskIDSelector, &out.SourceDiskIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotInitParameters. +func (in *SnapshotInitParameters) DeepCopy() *SnapshotInitParameters { + if in == nil { + return nil + } + out := new(SnapshotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotList) DeepCopyInto(out *SnapshotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Snapshot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotList. +func (in *SnapshotList) DeepCopy() *SnapshotList { + if in == nil { + return nil + } + out := new(SnapshotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotObservation) DeepCopyInto(out *SnapshotObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]SnapshotHardwareGenerationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceDiskID != nil { + in, out := &in.SourceDiskID, &out.SourceDiskID + *out = new(string) + **out = **in + } + if in.StorageSize != nil { + in, out := &in.StorageSize, &out.StorageSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotObservation. +func (in *SnapshotObservation) DeepCopy() *SnapshotObservation { + if in == nil { + return nil + } + out := new(SnapshotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotParameters) DeepCopyInto(out *SnapshotParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HardwareGeneration != nil { + in, out := &in.HardwareGeneration, &out.HardwareGeneration + *out = make([]SnapshotHardwareGenerationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceDiskID != nil { + in, out := &in.SourceDiskID, &out.SourceDiskID + *out = new(string) + **out = **in + } + if in.SourceDiskIDRef != nil { + in, out := &in.SourceDiskIDRef, &out.SourceDiskIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceDiskIDSelector != nil { + in, out := &in.SourceDiskIDSelector, &out.SourceDiskIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotParameters. +func (in *SnapshotParameters) DeepCopy() *SnapshotParameters { + if in == nil { + return nil + } + out := new(SnapshotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotSchedule) DeepCopyInto(out *SnapshotSchedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSchedule. +func (in *SnapshotSchedule) DeepCopy() *SnapshotSchedule { + if in == nil { + return nil + } + out := new(SnapshotSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotSchedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBinding) DeepCopyInto(out *SnapshotScheduleIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBinding. +func (in *SnapshotScheduleIAMBinding) DeepCopy() *SnapshotScheduleIAMBinding { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotScheduleIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingInitParameters) DeepCopyInto(out *SnapshotScheduleIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotScheduleID != nil { + in, out := &in.SnapshotScheduleID, &out.SnapshotScheduleID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingInitParameters. +func (in *SnapshotScheduleIAMBindingInitParameters) DeepCopy() *SnapshotScheduleIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingList) DeepCopyInto(out *SnapshotScheduleIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SnapshotScheduleIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingList. +func (in *SnapshotScheduleIAMBindingList) DeepCopy() *SnapshotScheduleIAMBindingList { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotScheduleIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingObservation) DeepCopyInto(out *SnapshotScheduleIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotScheduleID != nil { + in, out := &in.SnapshotScheduleID, &out.SnapshotScheduleID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingObservation. +func (in *SnapshotScheduleIAMBindingObservation) DeepCopy() *SnapshotScheduleIAMBindingObservation { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingParameters) DeepCopyInto(out *SnapshotScheduleIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotScheduleID != nil { + in, out := &in.SnapshotScheduleID, &out.SnapshotScheduleID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingParameters. +func (in *SnapshotScheduleIAMBindingParameters) DeepCopy() *SnapshotScheduleIAMBindingParameters { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingSpec) DeepCopyInto(out *SnapshotScheduleIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingSpec. +func (in *SnapshotScheduleIAMBindingSpec) DeepCopy() *SnapshotScheduleIAMBindingSpec { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingStatus) DeepCopyInto(out *SnapshotScheduleIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingStatus. +func (in *SnapshotScheduleIAMBindingStatus) DeepCopy() *SnapshotScheduleIAMBindingStatus { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleInitParameters) DeepCopyInto(out *SnapshotScheduleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskIds != nil { + in, out := &in.DiskIds, &out.DiskIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DiskIdsRefs != nil { + in, out := &in.DiskIdsRefs, &out.DiskIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskIdsSelector != nil { + in, out := &in.DiskIdsSelector, &out.DiskIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(string) + **out = **in + } + if in.SchedulePolicy != nil { + in, out := &in.SchedulePolicy, &out.SchedulePolicy + *out = make([]SchedulePolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SnapshotCount != nil { + in, out := &in.SnapshotCount, &out.SnapshotCount + *out = new(float64) + **out = **in + } + if in.SnapshotSpec != nil { + in, out := &in.SnapshotSpec, &out.SnapshotSpec + *out = make([]SnapshotSpecInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleInitParameters. +func (in *SnapshotScheduleInitParameters) DeepCopy() *SnapshotScheduleInitParameters { + if in == nil { + return nil + } + out := new(SnapshotScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleList) DeepCopyInto(out *SnapshotScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SnapshotSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleList. +func (in *SnapshotScheduleList) DeepCopy() *SnapshotScheduleList { + if in == nil { + return nil + } + out := new(SnapshotScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleObservation) DeepCopyInto(out *SnapshotScheduleObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskIds != nil { + in, out := &in.DiskIds, &out.DiskIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(string) + **out = **in + } + if in.SchedulePolicy != nil { + in, out := &in.SchedulePolicy, &out.SchedulePolicy + *out = make([]SchedulePolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SnapshotCount != nil { + in, out := &in.SnapshotCount, &out.SnapshotCount + *out = new(float64) + **out = **in + } + if in.SnapshotSpec != nil { + in, out := &in.SnapshotSpec, &out.SnapshotSpec + *out = make([]SnapshotSpecObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleObservation. +func (in *SnapshotScheduleObservation) DeepCopy() *SnapshotScheduleObservation { + if in == nil { + return nil + } + out := new(SnapshotScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleParameters) DeepCopyInto(out *SnapshotScheduleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskIds != nil { + in, out := &in.DiskIds, &out.DiskIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DiskIdsRefs != nil { + in, out := &in.DiskIdsRefs, &out.DiskIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskIdsSelector != nil { + in, out := &in.DiskIdsSelector, &out.DiskIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(string) + **out = **in + } + if in.SchedulePolicy != nil { + in, out := &in.SchedulePolicy, &out.SchedulePolicy + *out = make([]SchedulePolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SnapshotCount != nil { + in, out := &in.SnapshotCount, &out.SnapshotCount + *out = new(float64) + **out = **in + } + if in.SnapshotSpec != nil { + in, out := &in.SnapshotSpec, &out.SnapshotSpec + *out = make([]SnapshotSpecParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleParameters. +func (in *SnapshotScheduleParameters) DeepCopy() *SnapshotScheduleParameters { + if in == nil { + return nil + } + out := new(SnapshotScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleSpec) DeepCopyInto(out *SnapshotScheduleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleSpec. +func (in *SnapshotScheduleSpec) DeepCopy() *SnapshotScheduleSpec { + if in == nil { + return nil + } + out := new(SnapshotScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleStatus) DeepCopyInto(out *SnapshotScheduleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleStatus. +func (in *SnapshotScheduleStatus) DeepCopy() *SnapshotScheduleStatus { + if in == nil { + return nil + } + out := new(SnapshotScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotSpec) DeepCopyInto(out *SnapshotSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSpec. +func (in *SnapshotSpec) DeepCopy() *SnapshotSpec { + if in == nil { + return nil + } + out := new(SnapshotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotSpecInitParameters) DeepCopyInto(out *SnapshotSpecInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSpecInitParameters. +func (in *SnapshotSpecInitParameters) DeepCopy() *SnapshotSpecInitParameters { + if in == nil { + return nil + } + out := new(SnapshotSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotSpecObservation) DeepCopyInto(out *SnapshotSpecObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSpecObservation. +func (in *SnapshotSpecObservation) DeepCopy() *SnapshotSpecObservation { + if in == nil { + return nil + } + out := new(SnapshotSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotSpecParameters) DeepCopyInto(out *SnapshotSpecParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSpecParameters. +func (in *SnapshotSpecParameters) DeepCopy() *SnapshotSpecParameters { + if in == nil { + return nil + } + out := new(SnapshotSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotStatus) DeepCopyInto(out *SnapshotStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotStatus. +func (in *SnapshotStatus) DeepCopy() *SnapshotStatus { + if in == nil { + return nil + } + out := new(SnapshotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPOptionsInitParameters) DeepCopyInto(out *TCPOptionsInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPOptionsInitParameters. +func (in *TCPOptionsInitParameters) DeepCopy() *TCPOptionsInitParameters { + if in == nil { + return nil + } + out := new(TCPOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPOptionsObservation) DeepCopyInto(out *TCPOptionsObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPOptionsObservation. +func (in *TCPOptionsObservation) DeepCopy() *TCPOptionsObservation { + if in == nil { + return nil + } + out := new(TCPOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPOptionsParameters) DeepCopyInto(out *TCPOptionsParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPOptionsParameters. +func (in *TCPOptionsParameters) DeepCopy() *TCPOptionsParameters { + if in == nil { + return nil + } + out := new(TCPOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestAutoScaleCustomRuleInitParameters) DeepCopyInto(out *TestAutoScaleCustomRuleInitParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricType != nil { + in, out := &in.MetricType, &out.MetricType + *out = new(string) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestAutoScaleCustomRuleInitParameters. +func (in *TestAutoScaleCustomRuleInitParameters) DeepCopy() *TestAutoScaleCustomRuleInitParameters { + if in == nil { + return nil + } + out := new(TestAutoScaleCustomRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestAutoScaleCustomRuleObservation) DeepCopyInto(out *TestAutoScaleCustomRuleObservation) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricType != nil { + in, out := &in.MetricType, &out.MetricType + *out = new(string) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestAutoScaleCustomRuleObservation. +func (in *TestAutoScaleCustomRuleObservation) DeepCopy() *TestAutoScaleCustomRuleObservation { + if in == nil { + return nil + } + out := new(TestAutoScaleCustomRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestAutoScaleCustomRuleParameters) DeepCopyInto(out *TestAutoScaleCustomRuleParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricType != nil { + in, out := &in.MetricType, &out.MetricType + *out = new(string) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestAutoScaleCustomRuleParameters. +func (in *TestAutoScaleCustomRuleParameters) DeepCopy() *TestAutoScaleCustomRuleParameters { + if in == nil { + return nil + } + out := new(TestAutoScaleCustomRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestAutoScaleInitParameters) DeepCopyInto(out *TestAutoScaleInitParameters) { + *out = *in + if in.AutoScaleType != nil { + in, out := &in.AutoScaleType, &out.AutoScaleType + *out = new(string) + **out = **in + } + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(float64) + **out = **in + } + if in.CustomRule != nil { + in, out := &in.CustomRule, &out.CustomRule + *out = make([]TestAutoScaleCustomRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitialSize != nil { + in, out := &in.InitialSize, &out.InitialSize + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(float64) + **out = **in + } + if in.MinZoneSize != nil { + in, out := &in.MinZoneSize, &out.MinZoneSize + *out = new(float64) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(float64) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestAutoScaleInitParameters. +func (in *TestAutoScaleInitParameters) DeepCopy() *TestAutoScaleInitParameters { + if in == nil { + return nil + } + out := new(TestAutoScaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestAutoScaleObservation) DeepCopyInto(out *TestAutoScaleObservation) { + *out = *in + if in.AutoScaleType != nil { + in, out := &in.AutoScaleType, &out.AutoScaleType + *out = new(string) + **out = **in + } + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(float64) + **out = **in + } + if in.CustomRule != nil { + in, out := &in.CustomRule, &out.CustomRule + *out = make([]TestAutoScaleCustomRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitialSize != nil { + in, out := &in.InitialSize, &out.InitialSize + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(float64) + **out = **in + } + if in.MinZoneSize != nil { + in, out := &in.MinZoneSize, &out.MinZoneSize + *out = new(float64) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(float64) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestAutoScaleObservation. +func (in *TestAutoScaleObservation) DeepCopy() *TestAutoScaleObservation { + if in == nil { + return nil + } + out := new(TestAutoScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestAutoScaleParameters) DeepCopyInto(out *TestAutoScaleParameters) { + *out = *in + if in.AutoScaleType != nil { + in, out := &in.AutoScaleType, &out.AutoScaleType + *out = new(string) + **out = **in + } + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(float64) + **out = **in + } + if in.CustomRule != nil { + in, out := &in.CustomRule, &out.CustomRule + *out = make([]TestAutoScaleCustomRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitialSize != nil { + in, out := &in.InitialSize, &out.InitialSize + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(float64) + **out = **in + } + if in.MinZoneSize != nil { + in, out := &in.MinZoneSize, &out.MinZoneSize + *out = new(float64) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(float64) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestAutoScaleParameters. +func (in *TestAutoScaleParameters) DeepCopy() *TestAutoScaleParameters { + if in == nil { + return nil + } + out := new(TestAutoScaleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/compute/v1alpha1/zz_generated.resolvers.go b/apis/compute/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..22d60b9 --- /dev/null +++ b/apis/compute/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,945 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Disk. +func (mg *Disk) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.DiskPlacementPolicy); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DiskPlacementPolicy[i3].DiskPlacementGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DiskPlacementPolicy[i3].DiskPlacementGroupIDRef, + Selector: mg.Spec.ForProvider.DiskPlacementPolicy[i3].DiskPlacementGroupIDSelector, + To: reference.To{ + List: &DiskPlacementGroupList{}, + Managed: &DiskPlacementGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DiskPlacementPolicy[i3].DiskPlacementGroupID") + } + mg.Spec.ForProvider.DiskPlacementPolicy[i3].DiskPlacementGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DiskPlacementPolicy[i3].DiskPlacementGroupIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ImageID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ImageIDRef, + Selector: mg.Spec.ForProvider.ImageIDSelector, + To: reference.To{ + List: &ImageList{}, + Managed: &Image{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ImageID") + } + mg.Spec.ForProvider.ImageID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ImageIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.DiskPlacementPolicy); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DiskPlacementPolicy[i3].DiskPlacementGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DiskPlacementPolicy[i3].DiskPlacementGroupIDRef, + Selector: mg.Spec.InitProvider.DiskPlacementPolicy[i3].DiskPlacementGroupIDSelector, + To: reference.To{ + List: &DiskPlacementGroupList{}, + Managed: &DiskPlacementGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DiskPlacementPolicy[i3].DiskPlacementGroupID") + } + mg.Spec.InitProvider.DiskPlacementPolicy[i3].DiskPlacementGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DiskPlacementPolicy[i3].DiskPlacementGroupIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ImageID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ImageIDRef, + Selector: mg.Spec.InitProvider.ImageIDSelector, + To: reference.To{ + List: &ImageList{}, + Managed: &Image{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ImageID") + } + mg.Spec.InitProvider.ImageID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ImageIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DiskPlacementGroup. +func (mg *DiskPlacementGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Filesystem. +func (mg *Filesystem) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this GpuCluster. +func (mg *GpuCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Image. +func (mg *Image) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Instance. +func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.BootDisk); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.BootDisk[i3].DiskID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.BootDisk[i3].DiskIDRef, + Selector: mg.Spec.ForProvider.BootDisk[i3].DiskIDSelector, + To: reference.To{ + List: &DiskList{}, + Managed: &Disk{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.BootDisk[i3].DiskID") + } + mg.Spec.ForProvider.BootDisk[i3].DiskID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BootDisk[i3].DiskIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.BootDisk); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.BootDisk[i3].InitializeParams); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.BootDisk[i3].InitializeParams[i4].ImageID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.BootDisk[i3].InitializeParams[i4].ImageIDRef, + Selector: mg.Spec.ForProvider.BootDisk[i3].InitializeParams[i4].ImageIDSelector, + To: reference.To{ + List: &ImageList{}, + Managed: &Image{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.BootDisk[i3].InitializeParams[i4].ImageID") + } + mg.Spec.ForProvider.BootDisk[i3].InitializeParams[i4].ImageID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BootDisk[i3].InitializeParams[i4].ImageIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkInterface); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.NetworkInterface[i3].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.NetworkInterface[i3].SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.NetworkInterface[i3].SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterface[i3].SecurityGroupIds") + } + mg.Spec.ForProvider.NetworkInterface[i3].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.NetworkInterface[i3].SecurityGroupIdsRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkInterface); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkInterface[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkInterface[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.NetworkInterface[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterface[i3].SubnetID") + } + mg.Spec.ForProvider.NetworkInterface[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkInterface[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.BootDisk); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.BootDisk[i3].DiskID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.BootDisk[i3].DiskIDRef, + Selector: mg.Spec.InitProvider.BootDisk[i3].DiskIDSelector, + To: reference.To{ + List: &DiskList{}, + Managed: &Disk{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.BootDisk[i3].DiskID") + } + mg.Spec.InitProvider.BootDisk[i3].DiskID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BootDisk[i3].DiskIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.BootDisk); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.BootDisk[i3].InitializeParams); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.BootDisk[i3].InitializeParams[i4].ImageID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.BootDisk[i3].InitializeParams[i4].ImageIDRef, + Selector: mg.Spec.InitProvider.BootDisk[i3].InitializeParams[i4].ImageIDSelector, + To: reference.To{ + List: &ImageList{}, + Managed: &Image{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.BootDisk[i3].InitializeParams[i4].ImageID") + } + mg.Spec.InitProvider.BootDisk[i3].InitializeParams[i4].ImageID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BootDisk[i3].InitializeParams[i4].ImageIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkInterface); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.NetworkInterface[i3].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.NetworkInterface[i3].SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.NetworkInterface[i3].SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterface[i3].SecurityGroupIds") + } + mg.Spec.InitProvider.NetworkInterface[i3].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.NetworkInterface[i3].SecurityGroupIdsRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkInterface); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkInterface[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkInterface[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.NetworkInterface[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterface[i3].SubnetID") + } + mg.Spec.InitProvider.NetworkInterface[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkInterface[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this InstanceGroup. +func (mg *InstanceGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.InstanceTemplate[i3].BootDisk); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageIDRef, + Selector: mg.Spec.ForProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageIDSelector, + To: reference.To{ + List: &ImageList{}, + Managed: &Image{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageID") + } + mg.Spec.ForProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkIDRef, + Selector: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkID") + } + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds") + } + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsRefs, + Selector: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds") + } + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsRefs = mrsp.ResolvedReferences + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.InstanceTemplate[i3].BootDisk); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageIDRef, + Selector: mg.Spec.InitProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageIDSelector, + To: reference.To{ + List: &ImageList{}, + Managed: &Image{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageID") + } + mg.Spec.InitProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceTemplate[i3].BootDisk[i4].InitializeParams[i5].ImageIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkIDRef, + Selector: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkID") + } + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].NetworkIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds") + } + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsRefs, + Selector: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds") + } + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsRefs = mrsp.ResolvedReferences + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this PlacementGroup. +func (mg *PlacementGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Snapshot. +func (mg *Snapshot) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceDiskID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SourceDiskIDRef, + Selector: mg.Spec.ForProvider.SourceDiskIDSelector, + To: reference.To{ + List: &DiskList{}, + Managed: &Disk{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceDiskID") + } + mg.Spec.ForProvider.SourceDiskID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceDiskIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceDiskID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SourceDiskIDRef, + Selector: mg.Spec.InitProvider.SourceDiskIDSelector, + To: reference.To{ + List: &DiskList{}, + Managed: &Disk{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceDiskID") + } + mg.Spec.InitProvider.SourceDiskID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceDiskIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SnapshotSchedule. +func (mg *SnapshotSchedule) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.DiskIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.DiskIdsRefs, + Selector: mg.Spec.ForProvider.DiskIdsSelector, + To: reference.To{ + List: &DiskList{}, + Managed: &Disk{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DiskIds") + } + mg.Spec.ForProvider.DiskIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.DiskIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.DiskIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.DiskIdsRefs, + Selector: mg.Spec.InitProvider.DiskIdsSelector, + To: reference.To{ + List: &DiskList{}, + Managed: &Disk{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DiskIds") + } + mg.Spec.InitProvider.DiskIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.DiskIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/compute/v1alpha1/zz_gpucluster_terraformed.go b/apis/compute/v1alpha1/zz_gpucluster_terraformed.go new file mode 100755 index 0000000..6b7d8d5 --- /dev/null +++ b/apis/compute/v1alpha1/zz_gpucluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GpuCluster +func (mg *GpuCluster) GetTerraformResourceType() string { + return "yandex_compute_gpu_cluster" +} + +// GetConnectionDetailsMapping for this GpuCluster +func (tr *GpuCluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GpuCluster +func (tr *GpuCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GpuCluster +func (tr *GpuCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GpuCluster +func (tr *GpuCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GpuCluster +func (tr *GpuCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GpuCluster +func (tr *GpuCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GpuCluster +func (tr *GpuCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GpuCluster +func (tr *GpuCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GpuCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GpuCluster) LateInitialize(attrs []byte) (bool, error) { + params := &GpuClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GpuCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_gpucluster_types.go b/apis/compute/v1alpha1/zz_gpucluster_types.go new file mode 100755 index 0000000..91b1a15 --- /dev/null +++ b/apis/compute/v1alpha1/zz_gpucluster_types.go @@ -0,0 +1,169 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GpuClusterInitParameters struct { + + // Description of the GPU cluster. Provide this property when you create a resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the GPU cluster belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Type of interconnect between nodes to use in GPU cluster. Type infiniband is set by default, and it is the only one available at the moment. + InterconnectType *string `json:"interconnectType,omitempty" tf:"interconnect_type,omitempty"` + + // Labels to assign to this GPU cluster. A list of key/value pairs. For details about the concept, see documentation. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the GPU cluster. Provide this property when you create a resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Availability zone where the GPU cluster will reside. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type GpuClusterObservation struct { + + // Creation timestamp of the GPU cluster. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the GPU cluster. Provide this property when you create a resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the GPU cluster belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of interconnect between nodes to use in GPU cluster. Type infiniband is set by default, and it is the only one available at the moment. + InterconnectType *string `json:"interconnectType,omitempty" tf:"interconnect_type,omitempty"` + + // Labels to assign to this GPU cluster. A list of key/value pairs. For details about the concept, see documentation. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the GPU cluster. Provide this property when you create a resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The status of the GPU cluster. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Availability zone where the GPU cluster will reside. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type GpuClusterParameters struct { + + // Description of the GPU cluster. Provide this property when you create a resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the GPU cluster belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Type of interconnect between nodes to use in GPU cluster. Type infiniband is set by default, and it is the only one available at the moment. + // +kubebuilder:validation:Optional + InterconnectType *string `json:"interconnectType,omitempty" tf:"interconnect_type,omitempty"` + + // Labels to assign to this GPU cluster. A list of key/value pairs. For details about the concept, see documentation. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the GPU cluster. Provide this property when you create a resource. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Availability zone where the GPU cluster will reside. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +// GpuClusterSpec defines the desired state of GpuCluster +type GpuClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GpuClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GpuClusterInitParameters `json:"initProvider,omitempty"` +} + +// GpuClusterStatus defines the observed state of GpuCluster. +type GpuClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GpuClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// GpuCluster is the Schema for the GpuClusters API. GPU Cluster connects multiple Compute GPU Instances in the same availability zone with high-speed low-latency network. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type GpuCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec GpuClusterSpec `json:"spec"` + Status GpuClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GpuClusterList contains a list of GpuClusters +type GpuClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GpuCluster `json:"items"` +} + +// Repository type metadata. +var ( + GpuCluster_Kind = "GpuCluster" + GpuCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GpuCluster_Kind}.String() + GpuCluster_KindAPIVersion = GpuCluster_Kind + "." + CRDGroupVersion.String() + GpuCluster_GroupVersionKind = CRDGroupVersion.WithKind(GpuCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&GpuCluster{}, &GpuClusterList{}) +} diff --git a/apis/compute/v1alpha1/zz_gpuclusteriambinding_terraformed.go b/apis/compute/v1alpha1/zz_gpuclusteriambinding_terraformed.go new file mode 100755 index 0000000..939fd80 --- /dev/null +++ b/apis/compute/v1alpha1/zz_gpuclusteriambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GpuClusterIAMBinding +func (mg *GpuClusterIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_gpu_cluster_iam_binding" +} + +// GetConnectionDetailsMapping for this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GpuClusterIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GpuClusterIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &GpuClusterIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GpuClusterIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_gpuclusteriambinding_types.go b/apis/compute/v1alpha1/zz_gpuclusteriambinding_types.go new file mode 100755 index 0000000..50ba9aa --- /dev/null +++ b/apis/compute/v1alpha1/zz_gpuclusteriambinding_types.go @@ -0,0 +1,118 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GpuClusterIAMBindingInitParameters struct { + + // ID of the gpu cluster to attach the policy to. + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_gpu_cluster_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type GpuClusterIAMBindingObservation struct { + + // ID of the gpu cluster to attach the policy to. + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_gpu_cluster_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type GpuClusterIAMBindingParameters struct { + + // ID of the gpu cluster to attach the policy to. + // +kubebuilder:validation:Optional + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_gpu_cluster_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// GpuClusterIAMBindingSpec defines the desired state of GpuClusterIAMBinding +type GpuClusterIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GpuClusterIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GpuClusterIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// GpuClusterIAMBindingStatus defines the observed state of GpuClusterIAMBinding. +type GpuClusterIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GpuClusterIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// GpuClusterIAMBinding is the Schema for the GpuClusterIAMBindings API. Allows management of a single IAM binding for a GPU Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type GpuClusterIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gpuClusterId) || (has(self.initProvider) && has(self.initProvider.gpuClusterId))",message="spec.forProvider.gpuClusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec GpuClusterIAMBindingSpec `json:"spec"` + Status GpuClusterIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GpuClusterIAMBindingList contains a list of GpuClusterIAMBindings +type GpuClusterIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GpuClusterIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + GpuClusterIAMBinding_Kind = "GpuClusterIAMBinding" + GpuClusterIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GpuClusterIAMBinding_Kind}.String() + GpuClusterIAMBinding_KindAPIVersion = GpuClusterIAMBinding_Kind + "." + CRDGroupVersion.String() + GpuClusterIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(GpuClusterIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&GpuClusterIAMBinding{}, &GpuClusterIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_groupversion_info.go b/apis/compute/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..0f96047 --- /dev/null +++ b/apis/compute/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=compute.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "compute.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/compute/v1alpha1/zz_image_terraformed.go b/apis/compute/v1alpha1/zz_image_terraformed.go new file mode 100755 index 0000000..7e9408a --- /dev/null +++ b/apis/compute/v1alpha1/zz_image_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Image +func (mg *Image) GetTerraformResourceType() string { + return "yandex_compute_image" +} + +// GetConnectionDetailsMapping for this Image +func (tr *Image) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Image +func (tr *Image) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Image +func (tr *Image) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Image +func (tr *Image) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Image +func (tr *Image) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Image +func (tr *Image) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Image +func (tr *Image) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Image +func (tr *Image) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Image using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Image) LateInitialize(attrs []byte) (bool, error) { + params := &ImageParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Image) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_image_types.go b/apis/compute/v1alpha1/zz_image_types.go new file mode 100755 index 0000000..aeb5cab --- /dev/null +++ b/apis/compute/v1alpha1/zz_image_types.go @@ -0,0 +1,306 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HardwareGenerationGeneration2FeaturesInitParameters struct { +} + +type HardwareGenerationGeneration2FeaturesObservation struct { +} + +type HardwareGenerationGeneration2FeaturesParameters struct { +} + +type HardwareGenerationLegacyFeaturesInitParameters struct { + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +type HardwareGenerationLegacyFeaturesObservation struct { + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +type HardwareGenerationLegacyFeaturesParameters struct { + + // +kubebuilder:validation:Optional + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +type ImageHardwareGenerationInitParameters struct { + Generation2Features []HardwareGenerationGeneration2FeaturesInitParameters `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + LegacyFeatures []HardwareGenerationLegacyFeaturesInitParameters `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type ImageHardwareGenerationObservation struct { + Generation2Features []HardwareGenerationGeneration2FeaturesParameters `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + LegacyFeatures []HardwareGenerationLegacyFeaturesObservation `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type ImageHardwareGenerationParameters struct { + + // +kubebuilder:validation:Optional + Generation2Features []HardwareGenerationGeneration2FeaturesParameters `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + // +kubebuilder:validation:Optional + LegacyFeatures []HardwareGenerationLegacyFeaturesParameters `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type ImageInitParameters struct { + + // An optional description of the image. Provide this property when you create a resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the image family to which this image belongs. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + HardwareGeneration []ImageHardwareGenerationInitParameters `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + // A set of key/value label pairs to assign to the image. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Minimum size in GB of the disk that will be created from this image. + MinDiskSize *float64 `json:"minDiskSize,omitempty" tf:"min_disk_size,omitempty"` + + // Name of the disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operating system type that is contained in the image. Possible values: "LINUX", "WINDOWS". + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // Optimize the image to create a disk. + Pooled *bool `json:"pooled,omitempty" tf:"pooled,omitempty"` + + // License IDs that indicate which licenses are attached to this image. + // +listType=set + ProductIds []*string `json:"productIds,omitempty" tf:"product_ids,omitempty"` + + // The ID of a disk to use as the source of the image. Changing this ID forces a new resource to be created. + SourceDisk *string `json:"sourceDisk,omitempty" tf:"source_disk,omitempty"` + + // The name of the family to use as the source of the new image. The ID of the latest image is taken from the "standard-images" folder. Changing the family forces a new resource to be created. + SourceFamily *string `json:"sourceFamily,omitempty" tf:"source_family,omitempty"` + + // The ID of an existing image to use as the source of the image. Changing this ID forces a new resource to be created. + SourceImage *string `json:"sourceImage,omitempty" tf:"source_image,omitempty"` + + // The ID of a snapshot to use as the source of the image. Changing this ID forces a new resource to be created. + SourceSnapshot *string `json:"sourceSnapshot,omitempty" tf:"source_snapshot,omitempty"` + + // The URL to use as the source of the image. Changing this URL forces a new resource to be created. + SourceURL *string `json:"sourceUrl,omitempty" tf:"source_url,omitempty"` +} + +type ImageObservation struct { + + // Creation timestamp of the image. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // An optional description of the image. Provide this property when you create a resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the image family to which this image belongs. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + HardwareGeneration []ImageHardwareGenerationObservation `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the image. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Minimum size in GB of the disk that will be created from this image. + MinDiskSize *float64 `json:"minDiskSize,omitempty" tf:"min_disk_size,omitempty"` + + // Name of the disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operating system type that is contained in the image. Possible values: "LINUX", "WINDOWS". + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // Optimize the image to create a disk. + Pooled *bool `json:"pooled,omitempty" tf:"pooled,omitempty"` + + // License IDs that indicate which licenses are attached to this image. + // +listType=set + ProductIds []*string `json:"productIds,omitempty" tf:"product_ids,omitempty"` + + // The size of the image, specified in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The ID of a disk to use as the source of the image. Changing this ID forces a new resource to be created. + SourceDisk *string `json:"sourceDisk,omitempty" tf:"source_disk,omitempty"` + + // The name of the family to use as the source of the new image. The ID of the latest image is taken from the "standard-images" folder. Changing the family forces a new resource to be created. + SourceFamily *string `json:"sourceFamily,omitempty" tf:"source_family,omitempty"` + + // The ID of an existing image to use as the source of the image. Changing this ID forces a new resource to be created. + SourceImage *string `json:"sourceImage,omitempty" tf:"source_image,omitempty"` + + // The ID of a snapshot to use as the source of the image. Changing this ID forces a new resource to be created. + SourceSnapshot *string `json:"sourceSnapshot,omitempty" tf:"source_snapshot,omitempty"` + + // The URL to use as the source of the image. Changing this URL forces a new resource to be created. + SourceURL *string `json:"sourceUrl,omitempty" tf:"source_url,omitempty"` + + // The status of the image. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ImageParameters struct { + + // An optional description of the image. Provide this property when you create a resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the image family to which this image belongs. + // +kubebuilder:validation:Optional + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + HardwareGeneration []ImageHardwareGenerationParameters `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + // A set of key/value label pairs to assign to the image. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Minimum size in GB of the disk that will be created from this image. + // +kubebuilder:validation:Optional + MinDiskSize *float64 `json:"minDiskSize,omitempty" tf:"min_disk_size,omitempty"` + + // Name of the disk. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operating system type that is contained in the image. Possible values: "LINUX", "WINDOWS". + // +kubebuilder:validation:Optional + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // Optimize the image to create a disk. + // +kubebuilder:validation:Optional + Pooled *bool `json:"pooled,omitempty" tf:"pooled,omitempty"` + + // License IDs that indicate which licenses are attached to this image. + // +kubebuilder:validation:Optional + // +listType=set + ProductIds []*string `json:"productIds,omitempty" tf:"product_ids,omitempty"` + + // The ID of a disk to use as the source of the image. Changing this ID forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceDisk *string `json:"sourceDisk,omitempty" tf:"source_disk,omitempty"` + + // The name of the family to use as the source of the new image. The ID of the latest image is taken from the "standard-images" folder. Changing the family forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceFamily *string `json:"sourceFamily,omitempty" tf:"source_family,omitempty"` + + // The ID of an existing image to use as the source of the image. Changing this ID forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceImage *string `json:"sourceImage,omitempty" tf:"source_image,omitempty"` + + // The ID of a snapshot to use as the source of the image. Changing this ID forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceSnapshot *string `json:"sourceSnapshot,omitempty" tf:"source_snapshot,omitempty"` + + // The URL to use as the source of the image. Changing this URL forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceURL *string `json:"sourceUrl,omitempty" tf:"source_url,omitempty"` +} + +// ImageSpec defines the desired state of Image +type ImageSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ImageParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ImageInitParameters `json:"initProvider,omitempty"` +} + +// ImageStatus defines the observed state of Image. +type ImageStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ImageObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Image is the Schema for the Images API. Creates a VM image for the Yandex Compute service from an existing tarball. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Image struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ImageSpec `json:"spec"` + Status ImageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ImageList contains a list of Images +type ImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Image `json:"items"` +} + +// Repository type metadata. +var ( + Image_Kind = "Image" + Image_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Image_Kind}.String() + Image_KindAPIVersion = Image_Kind + "." + CRDGroupVersion.String() + Image_GroupVersionKind = CRDGroupVersion.WithKind(Image_Kind) +) + +func init() { + SchemeBuilder.Register(&Image{}, &ImageList{}) +} diff --git a/apis/compute/v1alpha1/zz_imageiambinding_terraformed.go b/apis/compute/v1alpha1/zz_imageiambinding_terraformed.go new file mode 100755 index 0000000..e13b95e --- /dev/null +++ b/apis/compute/v1alpha1/zz_imageiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ImageIAMBinding +func (mg *ImageIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_image_iam_binding" +} + +// GetConnectionDetailsMapping for this ImageIAMBinding +func (tr *ImageIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ImageIAMBinding +func (tr *ImageIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ImageIAMBinding +func (tr *ImageIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ImageIAMBinding +func (tr *ImageIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ImageIAMBinding +func (tr *ImageIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ImageIAMBinding +func (tr *ImageIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ImageIAMBinding +func (tr *ImageIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ImageIAMBinding +func (tr *ImageIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ImageIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ImageIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &ImageIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ImageIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_imageiambinding_types.go b/apis/compute/v1alpha1/zz_imageiambinding_types.go new file mode 100755 index 0000000..61f92d6 --- /dev/null +++ b/apis/compute/v1alpha1/zz_imageiambinding_types.go @@ -0,0 +1,117 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ImageIAMBindingInitParameters struct { + + // ID of the image to attach the policy to. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_image_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type ImageIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ID of the image to attach the policy to. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_image_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type ImageIAMBindingParameters struct { + + // ID of the image to attach the policy to. + // +kubebuilder:validation:Optional + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_image_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// ImageIAMBindingSpec defines the desired state of ImageIAMBinding +type ImageIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ImageIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ImageIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// ImageIAMBindingStatus defines the observed state of ImageIAMBinding. +type ImageIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ImageIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ImageIAMBinding is the Schema for the ImageIAMBindings API. Allows management of a single IAM binding for an image. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ImageIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.imageId) || (has(self.initProvider) && has(self.initProvider.imageId))",message="spec.forProvider.imageId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec ImageIAMBindingSpec `json:"spec"` + Status ImageIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ImageIAMBindingList contains a list of ImageIAMBindings +type ImageIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ImageIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + ImageIAMBinding_Kind = "ImageIAMBinding" + ImageIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ImageIAMBinding_Kind}.String() + ImageIAMBinding_KindAPIVersion = ImageIAMBinding_Kind + "." + CRDGroupVersion.String() + ImageIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(ImageIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&ImageIAMBinding{}, &ImageIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_instance_terraformed.go b/apis/compute/v1alpha1/zz_instance_terraformed.go new file mode 100755 index 0000000..c19468a --- /dev/null +++ b/apis/compute/v1alpha1/zz_instance_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Instance +func (mg *Instance) GetTerraformResourceType() string { + return "yandex_compute_instance" +} + +// GetConnectionDetailsMapping for this Instance +func (tr *Instance) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Instance +func (tr *Instance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Instance +func (tr *Instance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Instance +func (tr *Instance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Instance +func (tr *Instance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Instance +func (tr *Instance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Instance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Instance) LateInitialize(attrs []byte) (bool, error) { + params := &InstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Instance) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/compute/v1alpha1/zz_instance_types.go b/apis/compute/v1alpha1/zz_instance_types.go new file mode 100755 index 0000000..5862968 --- /dev/null +++ b/apis/compute/v1alpha1/zz_instance_types.go @@ -0,0 +1,1187 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BootDiskInitParameters struct { + + // Defines whether the disk will be auto-deleted when the instance is deleted. The default value is True. + AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` + + // Name that can be used to access an attached disk. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // The ID of the existing disk (such as those managed by yandex_compute_disk) to attach as a boot disk. + // +crossplane:generate:reference:type=Disk + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Reference to a Disk to populate diskId. + // +kubebuilder:validation:Optional + DiskIDRef *v1.Reference `json:"diskIdRef,omitempty" tf:"-"` + + // Selector for a Disk to populate diskId. + // +kubebuilder:validation:Optional + DiskIDSelector *v1.Selector `json:"diskIdSelector,omitempty" tf:"-"` + + // Parameters for a new disk that will be created alongside the new instance. Either initialize_params or disk_id must be set. The structure is documented below. + InitializeParams []InitializeParamsInitParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + + // Type of access to the disk resource. By default, a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type BootDiskObservation struct { + + // Defines whether the disk will be auto-deleted when the instance is deleted. The default value is True. + AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` + + // Name that can be used to access an attached disk. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // The ID of the existing disk (such as those managed by yandex_compute_disk) to attach as a boot disk. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Parameters for a new disk that will be created alongside the new instance. Either initialize_params or disk_id must be set. The structure is documented below. + InitializeParams []InitializeParamsObservation `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + + // Type of access to the disk resource. By default, a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type BootDiskParameters struct { + + // Defines whether the disk will be auto-deleted when the instance is deleted. The default value is True. + // +kubebuilder:validation:Optional + AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` + + // Name that can be used to access an attached disk. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // The ID of the existing disk (such as those managed by yandex_compute_disk) to attach as a boot disk. + // +crossplane:generate:reference:type=Disk + // +kubebuilder:validation:Optional + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Reference to a Disk to populate diskId. + // +kubebuilder:validation:Optional + DiskIDRef *v1.Reference `json:"diskIdRef,omitempty" tf:"-"` + + // Selector for a Disk to populate diskId. + // +kubebuilder:validation:Optional + DiskIDSelector *v1.Selector `json:"diskIdSelector,omitempty" tf:"-"` + + // Parameters for a new disk that will be created alongside the new instance. Either initialize_params or disk_id must be set. The structure is documented below. + // +kubebuilder:validation:Optional + InitializeParams []InitializeParamsParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + + // Type of access to the disk resource. By default, a disk is attached in READ_WRITE mode. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type DNSRecordInitParameters struct { + + // DNS zone ID (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN (must have a dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. in seconds + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type DNSRecordObservation struct { + + // DNS zone ID (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN (must have a dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. in seconds + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type DNSRecordParameters struct { + + // DNS zone ID (if not set, private zone used). + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN (must have a dot at the end). + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + // +kubebuilder:validation:Optional + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. in seconds + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type HostAffinityRulesInitParameters struct { + + // Affinity label or one of reserved values - yc.hostId, yc.hostGroupId. + Key *string `json:"key,omitempty" tf:"key"` + + // Affinity action. The only value supported is IN. + Op *string `json:"op,omitempty" tf:"op"` + + Values []*string `json:"values,omitempty" tf:"values"` +} + +type HostAffinityRulesObservation struct { + + // Affinity label or one of reserved values - yc.hostId, yc.hostGroupId. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Affinity action. The only value supported is IN. + Op *string `json:"op,omitempty" tf:"op,omitempty"` + + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HostAffinityRulesParameters struct { + + // Affinity label or one of reserved values - yc.hostId, yc.hostGroupId. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key"` + + // Affinity action. The only value supported is IN. + // +kubebuilder:validation:Optional + Op *string `json:"op,omitempty" tf:"op"` + + // +kubebuilder:validation:Optional + Values []*string `json:"values,omitempty" tf:"values"` +} + +type IPv6DNSRecordInitParameters struct { + + // DNS zone ID (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN (must have a dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. in seconds + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type IPv6DNSRecordObservation struct { + + // DNS zone ID (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN (must have a dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. in seconds + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type IPv6DNSRecordParameters struct { + + // DNS zone ID (if not set, private zone used). + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN (must have a dot at the end). + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + // +kubebuilder:validation:Optional + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. in seconds + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type InitializeParamsInitParameters struct { + + // Block size of the disk, specified in bytes. + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + + // Description of the boot disk. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A disk image to initialize this disk from. + // +crossplane:generate:reference:type=Image + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Reference to a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDRef *v1.Reference `json:"imageIdRef,omitempty" tf:"-"` + + // Selector for a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDSelector *v1.Selector `json:"imageIdSelector,omitempty" tf:"-"` + + // Resource name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Size of the disk in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // A snapshot to initialize this disk from. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Disk type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type InitializeParamsObservation struct { + + // Block size of the disk, specified in bytes. + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + + // Description of the boot disk. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A disk image to initialize this disk from. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Resource name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Size of the disk in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // A snapshot to initialize this disk from. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Disk type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type InitializeParamsParameters struct { + + // Block size of the disk, specified in bytes. + // +kubebuilder:validation:Optional + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + + // Description of the boot disk. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A disk image to initialize this disk from. + // +crossplane:generate:reference:type=Image + // +kubebuilder:validation:Optional + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Reference to a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDRef *v1.Reference `json:"imageIdRef,omitempty" tf:"-"` + + // Selector for a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDSelector *v1.Selector `json:"imageIdSelector,omitempty" tf:"-"` + + // Resource name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Size of the disk in GB. + // +kubebuilder:validation:Optional + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // A snapshot to initialize this disk from. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Disk type. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type InstanceFilesystemInitParameters struct { + + // Name of the device representing the filesystem on the instance. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the filesystem that should be attached. + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Mode of access to the filesystem that should be attached. By default, filesystem is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type InstanceFilesystemObservation struct { + + // Name of the device representing the filesystem on the instance. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the filesystem that should be attached. + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Mode of access to the filesystem that should be attached. By default, filesystem is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type InstanceFilesystemParameters struct { + + // Name of the device representing the filesystem on the instance. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the filesystem that should be attached. + // +kubebuilder:validation:Optional + FilesystemID *string `json:"filesystemId" tf:"filesystem_id,omitempty"` + + // Mode of access to the filesystem that should be attached. By default, filesystem is attached in READ_WRITE mode. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type InstanceHardwareGenerationGeneration2FeaturesInitParameters struct { +} + +type InstanceHardwareGenerationGeneration2FeaturesObservation struct { +} + +type InstanceHardwareGenerationGeneration2FeaturesParameters struct { +} + +type InstanceHardwareGenerationInitParameters struct { +} + +type InstanceHardwareGenerationLegacyFeaturesInitParameters struct { +} + +type InstanceHardwareGenerationLegacyFeaturesObservation struct { + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +type InstanceHardwareGenerationLegacyFeaturesParameters struct { +} + +type InstanceHardwareGenerationObservation struct { + Generation2Features []InstanceHardwareGenerationGeneration2FeaturesObservation `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + LegacyFeatures []InstanceHardwareGenerationLegacyFeaturesObservation `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type InstanceHardwareGenerationParameters struct { +} + +type InstanceInitParameters struct { + + // Default 5 minutes + AllowRecreate *bool `json:"allowRecreate,omitempty" tf:"allow_recreate,omitempty"` + + // If you try to update a property that requires stopping the instance without setting this field, the update will fail. + AllowStoppingForUpdate *bool `json:"allowStoppingForUpdate,omitempty" tf:"allow_stopping_for_update,omitempty"` + + // The boot disk for the instance. The structure is documented below. + BootDisk []BootDiskInitParameters `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` + + // Description of the instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // List of filesystems that are attached to the instance. Structure is documented below. + Filesystem []InstanceFilesystemInitParameters `json:"filesystem,omitempty" tf:"filesystem,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // ID of the GPU cluster to attach this instance to. The GPU cluster must exist in the same zone as the instance. + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + // Host name for the instance. This field is used to generate the instance fqdn value. The host name must be unique within the network and region. If not specified, the host name will be equal to id of the instance and fqdn will be .auto.internal. Otherwise FQDN will be ..internal. + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // A set of key/value label pairs to assign to the instance. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // List of local disks that are attached to the instance. Structure is documented below. + LocalDisk []LocalDiskInitParameters `json:"localDisk,omitempty" tf:"local_disk,omitempty"` + + // Time between notification via metadata service and maintenance. E.g., 60s. + MaintenanceGracePeriod *string `json:"maintenanceGracePeriod,omitempty" tf:"maintenance_grace_period,omitempty"` + + // Behaviour on maintenance events. The default is unspecified. Values: unspecified, migrate, restart. + MaintenancePolicy *string `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Metadata key/value pairs to make available from within the instance. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Options allow user to configure access to instance's metadata + MetadataOptions []MetadataOptionsInitParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // Resource name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of network acceleration. The default is standard. Values: standard, software_accelerated + NetworkAccelerationType *string `json:"networkAccelerationType,omitempty" tf:"network_acceleration_type,omitempty"` + + // Networks to attach to the instance. This can be specified multiple times. The structure is documented below. + NetworkInterface []NetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // The placement policy configuration. The structure is documented below. + PlacementPolicy []PlacementPolicyInitParameters `json:"placementPolicy,omitempty" tf:"placement_policy,omitempty"` + + // The type of virtual machine to create. The default is 'standard-v1'. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // Compute resources that are allocated for the instance. The structure is documented below. + Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // Scheduling policy configuration. The structure is documented below. + SchedulingPolicy []SchedulingPolicyInitParameters `json:"schedulingPolicy,omitempty" tf:"scheduling_policy,omitempty"` + + // A set of disks to attach to the instance. The structure is documented below. Note: The allow_stopping_for_update property must be set to true in order to update this structure. + SecondaryDisk []SecondaryDiskInitParameters `json:"secondaryDisk,omitempty" tf:"secondary_disk,omitempty"` + + // ID of the service account authorized for this instance. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // The availability zone where the virtual machine will be created. If it is not provided, the default provider folder is used. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type InstanceObservation struct { + + // Default 5 minutes + AllowRecreate *bool `json:"allowRecreate,omitempty" tf:"allow_recreate,omitempty"` + + // If you try to update a property that requires stopping the instance without setting this field, the update will fail. + AllowStoppingForUpdate *bool `json:"allowStoppingForUpdate,omitempty" tf:"allow_stopping_for_update,omitempty"` + + // The boot disk for the instance. The structure is documented below. + BootDisk []BootDiskObservation `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` + + // Creation timestamp of the instance. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // List of filesystems that are attached to the instance. Structure is documented below. + Filesystem []InstanceFilesystemObservation `json:"filesystem,omitempty" tf:"filesystem,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // The fully qualified DNS name of this instance. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // ID of the GPU cluster to attach this instance to. The GPU cluster must exist in the same zone as the instance. + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + HardwareGeneration []InstanceHardwareGenerationObservation `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + // Host name for the instance. This field is used to generate the instance fqdn value. The host name must be unique within the network and region. If not specified, the host name will be equal to id of the instance and fqdn will be .auto.internal. Otherwise FQDN will be ..internal. + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the instance. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // List of local disks that are attached to the instance. Structure is documented below. + LocalDisk []LocalDiskObservation `json:"localDisk,omitempty" tf:"local_disk,omitempty"` + + // Time between notification via metadata service and maintenance. E.g., 60s. + MaintenanceGracePeriod *string `json:"maintenanceGracePeriod,omitempty" tf:"maintenance_grace_period,omitempty"` + + // Behaviour on maintenance events. The default is unspecified. Values: unspecified, migrate, restart. + MaintenancePolicy *string `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Metadata key/value pairs to make available from within the instance. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Options allow user to configure access to instance's metadata + MetadataOptions []MetadataOptionsObservation `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // Resource name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of network acceleration. The default is standard. Values: standard, software_accelerated + NetworkAccelerationType *string `json:"networkAccelerationType,omitempty" tf:"network_acceleration_type,omitempty"` + + // Networks to attach to the instance. This can be specified multiple times. The structure is documented below. + NetworkInterface []NetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // The placement policy configuration. The structure is documented below. + PlacementPolicy []PlacementPolicyObservation `json:"placementPolicy,omitempty" tf:"placement_policy,omitempty"` + + // The type of virtual machine to create. The default is 'standard-v1'. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // Compute resources that are allocated for the instance. The structure is documented below. + Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // Scheduling policy configuration. The structure is documented below. + SchedulingPolicy []SchedulingPolicyObservation `json:"schedulingPolicy,omitempty" tf:"scheduling_policy,omitempty"` + + // A set of disks to attach to the instance. The structure is documented below. Note: The allow_stopping_for_update property must be set to true in order to update this structure. + SecondaryDisk []SecondaryDiskObservation `json:"secondaryDisk,omitempty" tf:"secondary_disk,omitempty"` + + // ID of the service account authorized for this instance. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // The status of this instance. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // The availability zone where the virtual machine will be created. If it is not provided, the default provider folder is used. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type InstanceParameters struct { + + // Default 5 minutes + // +kubebuilder:validation:Optional + AllowRecreate *bool `json:"allowRecreate,omitempty" tf:"allow_recreate,omitempty"` + + // If you try to update a property that requires stopping the instance without setting this field, the update will fail. + // +kubebuilder:validation:Optional + AllowStoppingForUpdate *bool `json:"allowStoppingForUpdate,omitempty" tf:"allow_stopping_for_update,omitempty"` + + // The boot disk for the instance. The structure is documented below. + // +kubebuilder:validation:Optional + BootDisk []BootDiskParameters `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` + + // Description of the instance. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // List of filesystems that are attached to the instance. Structure is documented below. + // +kubebuilder:validation:Optional + Filesystem []InstanceFilesystemParameters `json:"filesystem,omitempty" tf:"filesystem,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // ID of the GPU cluster to attach this instance to. The GPU cluster must exist in the same zone as the instance. + // +kubebuilder:validation:Optional + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + // Host name for the instance. This field is used to generate the instance fqdn value. The host name must be unique within the network and region. If not specified, the host name will be equal to id of the instance and fqdn will be .auto.internal. Otherwise FQDN will be ..internal. + // +kubebuilder:validation:Optional + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // A set of key/value label pairs to assign to the instance. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // List of local disks that are attached to the instance. Structure is documented below. + // +kubebuilder:validation:Optional + LocalDisk []LocalDiskParameters `json:"localDisk,omitempty" tf:"local_disk,omitempty"` + + // Time between notification via metadata service and maintenance. E.g., 60s. + // +kubebuilder:validation:Optional + MaintenanceGracePeriod *string `json:"maintenanceGracePeriod,omitempty" tf:"maintenance_grace_period,omitempty"` + + // Behaviour on maintenance events. The default is unspecified. Values: unspecified, migrate, restart. + // +kubebuilder:validation:Optional + MaintenancePolicy *string `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Metadata key/value pairs to make available from within the instance. + // +kubebuilder:validation:Optional + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Options allow user to configure access to instance's metadata + // +kubebuilder:validation:Optional + MetadataOptions []MetadataOptionsParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // Resource name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of network acceleration. The default is standard. Values: standard, software_accelerated + // +kubebuilder:validation:Optional + NetworkAccelerationType *string `json:"networkAccelerationType,omitempty" tf:"network_acceleration_type,omitempty"` + + // Networks to attach to the instance. This can be specified multiple times. The structure is documented below. + // +kubebuilder:validation:Optional + NetworkInterface []NetworkInterfaceParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // The placement policy configuration. The structure is documented below. + // +kubebuilder:validation:Optional + PlacementPolicy []PlacementPolicyParameters `json:"placementPolicy,omitempty" tf:"placement_policy,omitempty"` + + // The type of virtual machine to create. The default is 'standard-v1'. + // +kubebuilder:validation:Optional + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // Compute resources that are allocated for the instance. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []ResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // Scheduling policy configuration. The structure is documented below. + // +kubebuilder:validation:Optional + SchedulingPolicy []SchedulingPolicyParameters `json:"schedulingPolicy,omitempty" tf:"scheduling_policy,omitempty"` + + // A set of disks to attach to the instance. The structure is documented below. Note: The allow_stopping_for_update property must be set to true in order to update this structure. + // +kubebuilder:validation:Optional + SecondaryDisk []SecondaryDiskParameters `json:"secondaryDisk,omitempty" tf:"secondary_disk,omitempty"` + + // ID of the service account authorized for this instance. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // The availability zone where the virtual machine will be created. If it is not provided, the default provider folder is used. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type LocalDiskInitParameters struct { + + // Size of the disk, specified in bytes. + SizeBytes *float64 `json:"sizeBytes,omitempty" tf:"size_bytes,omitempty"` +} + +type LocalDiskObservation struct { + + // The name of the local disk device. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Size of the disk, specified in bytes. + SizeBytes *float64 `json:"sizeBytes,omitempty" tf:"size_bytes,omitempty"` +} + +type LocalDiskParameters struct { + + // Size of the disk, specified in bytes. + // +kubebuilder:validation:Optional + SizeBytes *float64 `json:"sizeBytes" tf:"size_bytes,omitempty"` +} + +type MetadataOptionsInitParameters struct { + AwsV1HTTPEndpoint *float64 `json:"awsV1HttpEndpoint,omitempty" tf:"aws_v1_http_endpoint,omitempty"` + + AwsV1HTTPToken *float64 `json:"awsV1HttpToken,omitempty" tf:"aws_v1_http_token,omitempty"` + + GceHTTPEndpoint *float64 `json:"gceHttpEndpoint,omitempty" tf:"gce_http_endpoint,omitempty"` + + GceHTTPToken *float64 `json:"gceHttpToken,omitempty" tf:"gce_http_token,omitempty"` +} + +type MetadataOptionsObservation struct { + AwsV1HTTPEndpoint *float64 `json:"awsV1HttpEndpoint,omitempty" tf:"aws_v1_http_endpoint,omitempty"` + + AwsV1HTTPToken *float64 `json:"awsV1HttpToken,omitempty" tf:"aws_v1_http_token,omitempty"` + + GceHTTPEndpoint *float64 `json:"gceHttpEndpoint,omitempty" tf:"gce_http_endpoint,omitempty"` + + GceHTTPToken *float64 `json:"gceHttpToken,omitempty" tf:"gce_http_token,omitempty"` +} + +type MetadataOptionsParameters struct { + + // +kubebuilder:validation:Optional + AwsV1HTTPEndpoint *float64 `json:"awsV1HttpEndpoint,omitempty" tf:"aws_v1_http_endpoint,omitempty"` + + // +kubebuilder:validation:Optional + AwsV1HTTPToken *float64 `json:"awsV1HttpToken,omitempty" tf:"aws_v1_http_token,omitempty"` + + // +kubebuilder:validation:Optional + GceHTTPEndpoint *float64 `json:"gceHttpEndpoint,omitempty" tf:"gce_http_endpoint,omitempty"` + + // +kubebuilder:validation:Optional + GceHTTPToken *float64 `json:"gceHttpToken,omitempty" tf:"gce_http_token,omitempty"` +} + +type NATDNSRecordInitParameters struct { + + // DNS zone ID (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN (must have a dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. in seconds + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NATDNSRecordObservation struct { + + // DNS zone ID (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN (must have a dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. in seconds + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NATDNSRecordParameters struct { + + // DNS zone ID (if not set, private zone used). + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN (must have a dot at the end). + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + // +kubebuilder:validation:Optional + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. in seconds + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkInterfaceInitParameters struct { + + // List of configurations for creating ipv4 DNS records. The structure is documented below. + DNSRecord []DNSRecordInitParameters `json:"dnsRecord,omitempty" tf:"dns_record,omitempty"` + + // The private IP address to assign to the instance. If empty, the address will be automatically assigned from the specified subnet. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // Allocate an IPv4 address for the interface. The default value is true. + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + // If true, allocate an IPv6 address for the interface. The address will be automatically assigned from the specified subnet. + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // The private IPv6 address to assign to the instance. + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // List of configurations for creating ipv6 DNS records. The structure is documented below. + IPv6DNSRecord []IPv6DNSRecordInitParameters `json:"ipv6DnsRecord,omitempty" tf:"ipv6_dns_record,omitempty"` + + // Index of network interface, will be calculated automatically for instance create or update operations if not specified. Required for attach/detach operations. + Index *float64 `json:"index,omitempty" tf:"index,omitempty"` + + // Provide a public address, for instance, to access the internet over NAT. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // List of configurations for creating ipv4 NAT DNS records. The structure is documented below. + NATDNSRecord []NATDNSRecordInitParameters `json:"natDnsRecord,omitempty" tf:"nat_dns_record,omitempty"` + + // Provide a public address, for instance, to access the internet over NAT. Address should be already reserved in web UI. + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + + // Security group ids for network interface. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // ID of the subnet to attach this interface to. The subnet must exist in the same zone where this instance will be created. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type NetworkInterfaceObservation struct { + + // List of configurations for creating ipv4 DNS records. The structure is documented below. + DNSRecord []DNSRecordObservation `json:"dnsRecord,omitempty" tf:"dns_record,omitempty"` + + // The private IP address to assign to the instance. If empty, the address will be automatically assigned from the specified subnet. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // Allocate an IPv4 address for the interface. The default value is true. + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + // If true, allocate an IPv6 address for the interface. The address will be automatically assigned from the specified subnet. + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // The private IPv6 address to assign to the instance. + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // List of configurations for creating ipv6 DNS records. The structure is documented below. + IPv6DNSRecord []IPv6DNSRecordObservation `json:"ipv6DnsRecord,omitempty" tf:"ipv6_dns_record,omitempty"` + + // Index of network interface, will be calculated automatically for instance create or update operations if not specified. Required for attach/detach operations. + Index *float64 `json:"index,omitempty" tf:"index,omitempty"` + + MacAddress *string `json:"macAddress,omitempty" tf:"mac_address,omitempty"` + + // Provide a public address, for instance, to access the internet over NAT. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // List of configurations for creating ipv4 NAT DNS records. The structure is documented below. + NATDNSRecord []NATDNSRecordObservation `json:"natDnsRecord,omitempty" tf:"nat_dns_record,omitempty"` + + // Provide a public address, for instance, to access the internet over NAT. Address should be already reserved in web UI. + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + + NATIPVersion *string `json:"natIpVersion,omitempty" tf:"nat_ip_version,omitempty"` + + // Security group ids for network interface. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // ID of the subnet to attach this interface to. The subnet must exist in the same zone where this instance will be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type NetworkInterfaceParameters struct { + + // List of configurations for creating ipv4 DNS records. The structure is documented below. + // +kubebuilder:validation:Optional + DNSRecord []DNSRecordParameters `json:"dnsRecord,omitempty" tf:"dns_record,omitempty"` + + // The private IP address to assign to the instance. If empty, the address will be automatically assigned from the specified subnet. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // Allocate an IPv4 address for the interface. The default value is true. + // +kubebuilder:validation:Optional + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + // If true, allocate an IPv6 address for the interface. The address will be automatically assigned from the specified subnet. + // +kubebuilder:validation:Optional + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // The private IPv6 address to assign to the instance. + // +kubebuilder:validation:Optional + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // List of configurations for creating ipv6 DNS records. The structure is documented below. + // +kubebuilder:validation:Optional + IPv6DNSRecord []IPv6DNSRecordParameters `json:"ipv6DnsRecord,omitempty" tf:"ipv6_dns_record,omitempty"` + + // Index of network interface, will be calculated automatically for instance create or update operations if not specified. Required for attach/detach operations. + // +kubebuilder:validation:Optional + Index *float64 `json:"index,omitempty" tf:"index,omitempty"` + + // Provide a public address, for instance, to access the internet over NAT. + // +kubebuilder:validation:Optional + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // List of configurations for creating ipv4 NAT DNS records. The structure is documented below. + // +kubebuilder:validation:Optional + NATDNSRecord []NATDNSRecordParameters `json:"natDnsRecord,omitempty" tf:"nat_dns_record,omitempty"` + + // Provide a public address, for instance, to access the internet over NAT. Address should be already reserved in web UI. + // +kubebuilder:validation:Optional + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + + // Security group ids for network interface. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // ID of the subnet to attach this interface to. The subnet must exist in the same zone where this instance will be created. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type PlacementPolicyInitParameters struct { + + // List of host affinity rules. The structure is documented below. + HostAffinityRules []HostAffinityRulesInitParameters `json:"hostAffinityRules,omitempty" tf:"host_affinity_rules,omitempty"` + + // Specifies the id of the Placement Group to assign to the instance. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` + + PlacementGroupPartition *float64 `json:"placementGroupPartition,omitempty" tf:"placement_group_partition,omitempty"` +} + +type PlacementPolicyObservation struct { + + // List of host affinity rules. The structure is documented below. + HostAffinityRules []HostAffinityRulesObservation `json:"hostAffinityRules,omitempty" tf:"host_affinity_rules,omitempty"` + + // Specifies the id of the Placement Group to assign to the instance. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` + + PlacementGroupPartition *float64 `json:"placementGroupPartition,omitempty" tf:"placement_group_partition,omitempty"` +} + +type PlacementPolicyParameters struct { + + // List of host affinity rules. The structure is documented below. + // +kubebuilder:validation:Optional + HostAffinityRules []HostAffinityRulesParameters `json:"hostAffinityRules,omitempty" tf:"host_affinity_rules,omitempty"` + + // Specifies the id of the Placement Group to assign to the instance. + // +kubebuilder:validation:Optional + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` + + // +kubebuilder:validation:Optional + PlacementGroupPartition *float64 `json:"placementGroupPartition,omitempty" tf:"placement_group_partition,omitempty"` +} + +type ResourcesInitParameters struct { + + // If provided, specifies baseline performance for a core as a percent. + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // CPU cores for the instance. + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + // If provided, specifies the number of GPU devices for the instance + Gpus *float64 `json:"gpus,omitempty" tf:"gpus,omitempty"` + + // Memory size in GB. + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type ResourcesObservation struct { + + // If provided, specifies baseline performance for a core as a percent. + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // CPU cores for the instance. + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + // If provided, specifies the number of GPU devices for the instance + Gpus *float64 `json:"gpus,omitempty" tf:"gpus,omitempty"` + + // Memory size in GB. + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type ResourcesParameters struct { + + // If provided, specifies baseline performance for a core as a percent. + // +kubebuilder:validation:Optional + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // CPU cores for the instance. + // +kubebuilder:validation:Optional + Cores *float64 `json:"cores" tf:"cores,omitempty"` + + // If provided, specifies the number of GPU devices for the instance + // +kubebuilder:validation:Optional + Gpus *float64 `json:"gpus,omitempty" tf:"gpus,omitempty"` + + // Memory size in GB. + // +kubebuilder:validation:Optional + Memory *float64 `json:"memory" tf:"memory,omitempty"` +} + +type SchedulingPolicyInitParameters struct { + + // Specifies if the instance is preemptible. Defaults to false. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` +} + +type SchedulingPolicyObservation struct { + + // Specifies if the instance is preemptible. Defaults to false. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` +} + +type SchedulingPolicyParameters struct { + + // Specifies if the instance is preemptible. Defaults to false. + // +kubebuilder:validation:Optional + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` +} + +type SecondaryDiskInitParameters struct { + + // Whether the disk is auto-deleted when the instance is deleted. The default value is false. + AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` + + // Name that can be used to access an attached disk under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the disk that is attached to the instance. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Type of access to the disk resource. By default, a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type SecondaryDiskObservation struct { + + // Whether the disk is auto-deleted when the instance is deleted. The default value is false. + AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` + + // Name that can be used to access an attached disk under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the disk that is attached to the instance. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Type of access to the disk resource. By default, a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type SecondaryDiskParameters struct { + + // Whether the disk is auto-deleted when the instance is deleted. The default value is false. + // +kubebuilder:validation:Optional + AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` + + // Name that can be used to access an attached disk under /dev/disk/by-id/. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the disk that is attached to the instance. + // +kubebuilder:validation:Optional + DiskID *string `json:"diskId" tf:"disk_id,omitempty"` + + // Type of access to the disk resource. By default, a disk is attached in READ_WRITE mode. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +// InstanceSpec defines the desired state of Instance +type InstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InstanceInitParameters `json:"initProvider,omitempty"` +} + +// InstanceStatus defines the observed state of Instance. +type InstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Instance is the Schema for the Instances API. Manages a VM instance resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Instance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.bootDisk) || (has(self.initProvider) && has(self.initProvider.bootDisk))",message="spec.forProvider.bootDisk is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.networkInterface) || (has(self.initProvider) && has(self.initProvider.networkInterface))",message="spec.forProvider.networkInterface is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resources) || (has(self.initProvider) && has(self.initProvider.resources))",message="spec.forProvider.resources is a required parameter" + Spec InstanceSpec `json:"spec"` + Status InstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceList contains a list of Instances +type InstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Instance `json:"items"` +} + +// Repository type metadata. +var ( + Instance_Kind = "Instance" + Instance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Instance_Kind}.String() + Instance_KindAPIVersion = Instance_Kind + "." + CRDGroupVersion.String() + Instance_GroupVersionKind = CRDGroupVersion.WithKind(Instance_Kind) +) + +func init() { + SchemeBuilder.Register(&Instance{}, &InstanceList{}) +} diff --git a/apis/compute/v1alpha1/zz_instancegroup_terraformed.go b/apis/compute/v1alpha1/zz_instancegroup_terraformed.go new file mode 100755 index 0000000..c146912 --- /dev/null +++ b/apis/compute/v1alpha1/zz_instancegroup_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this InstanceGroup +func (mg *InstanceGroup) GetTerraformResourceType() string { + return "yandex_compute_instance_group" +} + +// GetConnectionDetailsMapping for this InstanceGroup +func (tr *InstanceGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this InstanceGroup +func (tr *InstanceGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this InstanceGroup +func (tr *InstanceGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this InstanceGroup +func (tr *InstanceGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this InstanceGroup +func (tr *InstanceGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this InstanceGroup +func (tr *InstanceGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this InstanceGroup +func (tr *InstanceGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this InstanceGroup +func (tr *InstanceGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this InstanceGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *InstanceGroup) LateInitialize(attrs []byte) (bool, error) { + params := &InstanceGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *InstanceGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_instancegroup_types.go b/apis/compute/v1alpha1/zz_instancegroup_types.go new file mode 100755 index 0000000..1f75bc7 --- /dev/null +++ b/apis/compute/v1alpha1/zz_instancegroup_types.go @@ -0,0 +1,2155 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AllocationPolicyInitParameters struct { + + // Array of availability zone IDs with list of instance tags. + InstanceTagsPool []InstanceTagsPoolInitParameters `json:"instanceTagsPool,omitempty" tf:"instance_tags_pool,omitempty"` + + // A list of availability zones. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type AllocationPolicyObservation struct { + + // Array of availability zone IDs with list of instance tags. + InstanceTagsPool []InstanceTagsPoolObservation `json:"instanceTagsPool,omitempty" tf:"instance_tags_pool,omitempty"` + + // A list of availability zones. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type AllocationPolicyParameters struct { + + // Array of availability zone IDs with list of instance tags. + // +kubebuilder:validation:Optional + InstanceTagsPool []InstanceTagsPoolParameters `json:"instanceTagsPool,omitempty" tf:"instance_tags_pool,omitempty"` + + // A list of availability zones. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones" tf:"zones,omitempty"` +} + +type ApplicationLoadBalancerInitParameters struct { + + // Do not wait load balancer health checks. + IgnoreHealthChecks *bool `json:"ignoreHealthChecks,omitempty" tf:"ignore_health_checks,omitempty"` + + // Timeout for waiting for the VM to be checked by the load balancer. If the timeout is exceeded, the VM will be turned off based on the deployment policy. Specified in seconds. + MaxOpeningTrafficDuration *float64 `json:"maxOpeningTrafficDuration,omitempty" tf:"max_opening_traffic_duration,omitempty"` + + // A description of the target group. + TargetGroupDescription *string `json:"targetGroupDescription,omitempty" tf:"target_group_description,omitempty"` + + // A set of key/value label pairs. + // +mapType=granular + TargetGroupLabels map[string]*string `json:"targetGroupLabels,omitempty" tf:"target_group_labels,omitempty"` + + // The name of the target group. + TargetGroupName *string `json:"targetGroupName,omitempty" tf:"target_group_name,omitempty"` +} + +type ApplicationLoadBalancerObservation struct { + + // Do not wait load balancer health checks. + IgnoreHealthChecks *bool `json:"ignoreHealthChecks,omitempty" tf:"ignore_health_checks,omitempty"` + + // Timeout for waiting for the VM to be checked by the load balancer. If the timeout is exceeded, the VM will be turned off based on the deployment policy. Specified in seconds. + MaxOpeningTrafficDuration *float64 `json:"maxOpeningTrafficDuration,omitempty" tf:"max_opening_traffic_duration,omitempty"` + + // The status message of the target group. + StatusMessage *string `json:"statusMessage,omitempty" tf:"status_message,omitempty"` + + // A description of the target group. + TargetGroupDescription *string `json:"targetGroupDescription,omitempty" tf:"target_group_description,omitempty"` + + // The ID of the target group. + TargetGroupID *string `json:"targetGroupId,omitempty" tf:"target_group_id,omitempty"` + + // A set of key/value label pairs. + // +mapType=granular + TargetGroupLabels map[string]*string `json:"targetGroupLabels,omitempty" tf:"target_group_labels,omitempty"` + + // The name of the target group. + TargetGroupName *string `json:"targetGroupName,omitempty" tf:"target_group_name,omitempty"` +} + +type ApplicationLoadBalancerParameters struct { + + // Do not wait load balancer health checks. + // +kubebuilder:validation:Optional + IgnoreHealthChecks *bool `json:"ignoreHealthChecks,omitempty" tf:"ignore_health_checks,omitempty"` + + // Timeout for waiting for the VM to be checked by the load balancer. If the timeout is exceeded, the VM will be turned off based on the deployment policy. Specified in seconds. + // +kubebuilder:validation:Optional + MaxOpeningTrafficDuration *float64 `json:"maxOpeningTrafficDuration,omitempty" tf:"max_opening_traffic_duration,omitempty"` + + // A description of the target group. + // +kubebuilder:validation:Optional + TargetGroupDescription *string `json:"targetGroupDescription,omitempty" tf:"target_group_description,omitempty"` + + // A set of key/value label pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + TargetGroupLabels map[string]*string `json:"targetGroupLabels,omitempty" tf:"target_group_labels,omitempty"` + + // The name of the target group. + // +kubebuilder:validation:Optional + TargetGroupName *string `json:"targetGroupName,omitempty" tf:"target_group_name,omitempty"` +} + +type AutoScaleInitParameters struct { + + // . Autoscale type, can be ZONAL or REGIONAL. By default ZONAL type is used. + AutoScaleType *string `json:"autoScaleType,omitempty" tf:"auto_scale_type,omitempty"` + + // Target CPU load level. + CPUUtilizationTarget *float64 `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + + // A list of custom rules. The structure is documented below. + CustomRule []CustomRuleInitParameters `json:"customRule,omitempty" tf:"custom_rule,omitempty"` + + // The initial number of instances in the instance group. + InitialSize *float64 `json:"initialSize,omitempty" tf:"initial_size,omitempty"` + + // The maximum number of virtual machines in the group. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // The amount of time, in seconds, that metrics are averaged for. If the average value at the end of the interval is higher than the cpu_utilization_target, the instance group will increase the number of virtual machines in the group. + MeasurementDuration *float64 `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + + // The minimum number of virtual machines in a single availability zone. + MinZoneSize *float64 `json:"minZoneSize,omitempty" tf:"min_zone_size,omitempty"` + + // The minimum time interval, in seconds, to monitor the load before an instance group can reduce the number of virtual machines in the group. During this time, the group will not decrease even if the average load falls below the value of cpu_utilization_target. + StabilizationDuration *float64 `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + + // The warm-up time of the virtual machine, in seconds. During this time, traffic is fed to the virtual machine, but load metrics are not taken into account. + WarmupDuration *float64 `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` +} + +type AutoScaleObservation struct { + + // . Autoscale type, can be ZONAL or REGIONAL. By default ZONAL type is used. + AutoScaleType *string `json:"autoScaleType,omitempty" tf:"auto_scale_type,omitempty"` + + // Target CPU load level. + CPUUtilizationTarget *float64 `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + + // A list of custom rules. The structure is documented below. + CustomRule []CustomRuleObservation `json:"customRule,omitempty" tf:"custom_rule,omitempty"` + + // The initial number of instances in the instance group. + InitialSize *float64 `json:"initialSize,omitempty" tf:"initial_size,omitempty"` + + // The maximum number of virtual machines in the group. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // The amount of time, in seconds, that metrics are averaged for. If the average value at the end of the interval is higher than the cpu_utilization_target, the instance group will increase the number of virtual machines in the group. + MeasurementDuration *float64 `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + + // The minimum number of virtual machines in a single availability zone. + MinZoneSize *float64 `json:"minZoneSize,omitempty" tf:"min_zone_size,omitempty"` + + // The minimum time interval, in seconds, to monitor the load before an instance group can reduce the number of virtual machines in the group. During this time, the group will not decrease even if the average load falls below the value of cpu_utilization_target. + StabilizationDuration *float64 `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + + // The warm-up time of the virtual machine, in seconds. During this time, traffic is fed to the virtual machine, but load metrics are not taken into account. + WarmupDuration *float64 `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` +} + +type AutoScaleParameters struct { + + // . Autoscale type, can be ZONAL or REGIONAL. By default ZONAL type is used. + // +kubebuilder:validation:Optional + AutoScaleType *string `json:"autoScaleType,omitempty" tf:"auto_scale_type,omitempty"` + + // Target CPU load level. + // +kubebuilder:validation:Optional + CPUUtilizationTarget *float64 `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + + // A list of custom rules. The structure is documented below. + // +kubebuilder:validation:Optional + CustomRule []CustomRuleParameters `json:"customRule,omitempty" tf:"custom_rule,omitempty"` + + // The initial number of instances in the instance group. + // +kubebuilder:validation:Optional + InitialSize *float64 `json:"initialSize" tf:"initial_size,omitempty"` + + // The maximum number of virtual machines in the group. + // +kubebuilder:validation:Optional + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // The amount of time, in seconds, that metrics are averaged for. If the average value at the end of the interval is higher than the cpu_utilization_target, the instance group will increase the number of virtual machines in the group. + // +kubebuilder:validation:Optional + MeasurementDuration *float64 `json:"measurementDuration" tf:"measurement_duration,omitempty"` + + // The minimum number of virtual machines in a single availability zone. + // +kubebuilder:validation:Optional + MinZoneSize *float64 `json:"minZoneSize,omitempty" tf:"min_zone_size,omitempty"` + + // The minimum time interval, in seconds, to monitor the load before an instance group can reduce the number of virtual machines in the group. During this time, the group will not decrease even if the average load falls below the value of cpu_utilization_target. + // +kubebuilder:validation:Optional + StabilizationDuration *float64 `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + + // The warm-up time of the virtual machine, in seconds. During this time, traffic is fed to the virtual machine, but load metrics are not taken into account. + // +kubebuilder:validation:Optional + WarmupDuration *float64 `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` +} + +type BootDiskInitializeParamsInitParameters struct { + + // A description of the instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The disk image to initialize this disk from. + // +crossplane:generate:reference:type=Image + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Reference to a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDRef *v1.Reference `json:"imageIdRef,omitempty" tf:"-"` + + // Selector for a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDSelector *v1.Selector `json:"imageIdSelector,omitempty" tf:"-"` + + // The size of the disk in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The snapshot to initialize this disk from. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Network acceleration type. By default a network is in STANDARD mode. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type BootDiskInitializeParamsObservation struct { + + // A description of the instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The disk image to initialize this disk from. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // The size of the disk in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The snapshot to initialize this disk from. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Network acceleration type. By default a network is in STANDARD mode. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type BootDiskInitializeParamsParameters struct { + + // A description of the instance. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The disk image to initialize this disk from. + // +crossplane:generate:reference:type=Image + // +kubebuilder:validation:Optional + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Reference to a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDRef *v1.Reference `json:"imageIdRef,omitempty" tf:"-"` + + // Selector for a Image to populate imageId. + // +kubebuilder:validation:Optional + ImageIDSelector *v1.Selector `json:"imageIdSelector,omitempty" tf:"-"` + + // The size of the disk in GB. + // +kubebuilder:validation:Optional + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The snapshot to initialize this disk from. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Network acceleration type. By default a network is in STANDARD mode. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CustomRuleInitParameters struct { + + // The ID of the folder that the resources belong to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // A set of key/value label pairs to assign to the instance group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metric type, GAUGE or COUNTER. + MetricType *string `json:"metricType,omitempty" tf:"metric_type,omitempty"` + + // Rule type: UTILIZATION - This type means that the metric applies to one instance. First, Instance Groups calculates the average metric value for each instance, then averages the values for instances in one availability zone. This type of metric must have the instance_id label. WORKLOAD - This type means that the metric applies to instances in one availability zone. This type of metric must have the zone_id label. + RuleType *string `json:"ruleType,omitempty" tf:"rule_type,omitempty"` + + // Service of custom metric in Yandex Monitoring that should be used for scaling. + Service *string `json:"service,omitempty" tf:"service,omitempty"` + + // Target metric value level. + Target *float64 `json:"target,omitempty" tf:"target,omitempty"` +} + +type CustomRuleObservation struct { + + // The ID of the folder that the resources belong to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // A set of key/value label pairs to assign to the instance group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metric type, GAUGE or COUNTER. + MetricType *string `json:"metricType,omitempty" tf:"metric_type,omitempty"` + + // Rule type: UTILIZATION - This type means that the metric applies to one instance. First, Instance Groups calculates the average metric value for each instance, then averages the values for instances in one availability zone. This type of metric must have the instance_id label. WORKLOAD - This type means that the metric applies to instances in one availability zone. This type of metric must have the zone_id label. + RuleType *string `json:"ruleType,omitempty" tf:"rule_type,omitempty"` + + // Service of custom metric in Yandex Monitoring that should be used for scaling. + Service *string `json:"service,omitempty" tf:"service,omitempty"` + + // Target metric value level. + Target *float64 `json:"target,omitempty" tf:"target,omitempty"` +} + +type CustomRuleParameters struct { + + // The ID of the folder that the resources belong to. + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // A set of key/value label pairs to assign to the instance group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // Metric type, GAUGE or COUNTER. + // +kubebuilder:validation:Optional + MetricType *string `json:"metricType" tf:"metric_type,omitempty"` + + // Rule type: UTILIZATION - This type means that the metric applies to one instance. First, Instance Groups calculates the average metric value for each instance, then averages the values for instances in one availability zone. This type of metric must have the instance_id label. WORKLOAD - This type means that the metric applies to instances in one availability zone. This type of metric must have the zone_id label. + // +kubebuilder:validation:Optional + RuleType *string `json:"ruleType" tf:"rule_type,omitempty"` + + // Service of custom metric in Yandex Monitoring that should be used for scaling. + // +kubebuilder:validation:Optional + Service *string `json:"service,omitempty" tf:"service,omitempty"` + + // Target metric value level. + // +kubebuilder:validation:Optional + Target *float64 `json:"target" tf:"target,omitempty"` +} + +type DeployPolicyInitParameters struct { + + // The maximum number of instances that can be created at the same time. + MaxCreating *float64 `json:"maxCreating,omitempty" tf:"max_creating,omitempty"` + + // The maximum number of instances that can be deleted at the same time. + MaxDeleting *float64 `json:"maxDeleting,omitempty" tf:"max_deleting,omitempty"` + + // The maximum number of instances that can be temporarily allocated above the group's target size during the update process. + MaxExpansion *float64 `json:"maxExpansion,omitempty" tf:"max_expansion,omitempty"` + + // The maximum number of running instances that can be taken offline (stopped or deleted) at the same time during the update process. + MaxUnavailable *float64 `json:"maxUnavailable,omitempty" tf:"max_unavailable,omitempty"` + + // The amount of time in seconds to allow for an instance to start. Instance will be considered up and running (and start receiving traffic) only after the startup_duration has elapsed and all health checks are passed. + StartupDuration *float64 `json:"startupDuration,omitempty" tf:"startup_duration,omitempty"` + + // Affects the lifecycle of the instance during deployment. If set to proactive (default), Instance Groups can forcefully stop a running instance. If opportunistic, Instance Groups does not stop a running instance. Instead, it will wait until the instance stops itself or becomes unhealthy. + Strategy *string `json:"strategy,omitempty" tf:"strategy,omitempty"` +} + +type DeployPolicyObservation struct { + + // The maximum number of instances that can be created at the same time. + MaxCreating *float64 `json:"maxCreating,omitempty" tf:"max_creating,omitempty"` + + // The maximum number of instances that can be deleted at the same time. + MaxDeleting *float64 `json:"maxDeleting,omitempty" tf:"max_deleting,omitempty"` + + // The maximum number of instances that can be temporarily allocated above the group's target size during the update process. + MaxExpansion *float64 `json:"maxExpansion,omitempty" tf:"max_expansion,omitempty"` + + // The maximum number of running instances that can be taken offline (stopped or deleted) at the same time during the update process. + MaxUnavailable *float64 `json:"maxUnavailable,omitempty" tf:"max_unavailable,omitempty"` + + // The amount of time in seconds to allow for an instance to start. Instance will be considered up and running (and start receiving traffic) only after the startup_duration has elapsed and all health checks are passed. + StartupDuration *float64 `json:"startupDuration,omitempty" tf:"startup_duration,omitempty"` + + // Affects the lifecycle of the instance during deployment. If set to proactive (default), Instance Groups can forcefully stop a running instance. If opportunistic, Instance Groups does not stop a running instance. Instead, it will wait until the instance stops itself or becomes unhealthy. + Strategy *string `json:"strategy,omitempty" tf:"strategy,omitempty"` +} + +type DeployPolicyParameters struct { + + // The maximum number of instances that can be created at the same time. + // +kubebuilder:validation:Optional + MaxCreating *float64 `json:"maxCreating,omitempty" tf:"max_creating,omitempty"` + + // The maximum number of instances that can be deleted at the same time. + // +kubebuilder:validation:Optional + MaxDeleting *float64 `json:"maxDeleting,omitempty" tf:"max_deleting,omitempty"` + + // The maximum number of instances that can be temporarily allocated above the group's target size during the update process. + // +kubebuilder:validation:Optional + MaxExpansion *float64 `json:"maxExpansion" tf:"max_expansion,omitempty"` + + // The maximum number of running instances that can be taken offline (stopped or deleted) at the same time during the update process. + // +kubebuilder:validation:Optional + MaxUnavailable *float64 `json:"maxUnavailable" tf:"max_unavailable,omitempty"` + + // The amount of time in seconds to allow for an instance to start. Instance will be considered up and running (and start receiving traffic) only after the startup_duration has elapsed and all health checks are passed. + // +kubebuilder:validation:Optional + StartupDuration *float64 `json:"startupDuration,omitempty" tf:"startup_duration,omitempty"` + + // Affects the lifecycle of the instance during deployment. If set to proactive (default), Instance Groups can forcefully stop a running instance. If opportunistic, Instance Groups does not stop a running instance. Instead, it will wait until the instance stops itself or becomes unhealthy. + // +kubebuilder:validation:Optional + Strategy *string `json:"strategy,omitempty" tf:"strategy,omitempty"` +} + +type FixedScaleInitParameters struct { + + // The size of the disk in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type FixedScaleObservation struct { + + // The size of the disk in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type FixedScaleParameters struct { + + // The size of the disk in GB. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` +} + +type HTTPOptionsInitParameters struct { + + // The URL path used for health check requests. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port used for TCP health checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type HTTPOptionsObservation struct { + + // The URL path used for health check requests. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port used for TCP health checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type HTTPOptionsParameters struct { + + // The URL path used for health check requests. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // The port used for TCP health checks. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` +} + +type HealthCheckInitParameters struct { + + // HTTP check options. The structure is documented below. + HTTPOptions []HTTPOptionsInitParameters `json:"httpOptions,omitempty" tf:"http_options,omitempty"` + + // The number of successful health checks before the managed instance is declared healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // The interval to wait between health checks in seconds. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // TCP check options. The structure is documented below. + TCPOptions []TCPOptionsInitParameters `json:"tcpOptions,omitempty" tf:"tcp_options,omitempty"` + + // The length of time to wait for a response before the health check times out in seconds. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The number of failed health checks before the managed instance is declared unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckObservation struct { + + // HTTP check options. The structure is documented below. + HTTPOptions []HTTPOptionsObservation `json:"httpOptions,omitempty" tf:"http_options,omitempty"` + + // The number of successful health checks before the managed instance is declared healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // The interval to wait between health checks in seconds. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // TCP check options. The structure is documented below. + TCPOptions []TCPOptionsObservation `json:"tcpOptions,omitempty" tf:"tcp_options,omitempty"` + + // The length of time to wait for a response before the health check times out in seconds. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The number of failed health checks before the managed instance is declared unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckParameters struct { + + // HTTP check options. The structure is documented below. + // +kubebuilder:validation:Optional + HTTPOptions []HTTPOptionsParameters `json:"httpOptions,omitempty" tf:"http_options,omitempty"` + + // The number of successful health checks before the managed instance is declared healthy. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // The interval to wait between health checks in seconds. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // TCP check options. The structure is documented below. + // +kubebuilder:validation:Optional + TCPOptions []TCPOptionsParameters `json:"tcpOptions,omitempty" tf:"tcp_options,omitempty"` + + // The length of time to wait for a response before the health check times out in seconds. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The number of failed health checks before the managed instance is declared unhealthy. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type InstanceGroupInitParameters struct { + + // The allocation policy of the instance group by zone and region. The structure is documented below. + AllocationPolicy []AllocationPolicyInitParameters `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + + // Application Load balancing (L7) specifications. The structure is documented below. + ApplicationLoadBalancer []ApplicationLoadBalancerInitParameters `json:"applicationLoadBalancer,omitempty" tf:"application_load_balancer,omitempty"` + + // Flag that protects the instance group from accidental deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // The deployment policy of the instance group. The structure is documented below. + DeployPolicy []DeployPolicyInitParameters `json:"deployPolicy,omitempty" tf:"deploy_policy,omitempty"` + + // A description of the instance group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resources belong to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Health check specifications. The structure is documented below. + HealthCheck []HealthCheckInitParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // The template for creating new instances. The structure is documented below. + InstanceTemplate []InstanceTemplateInitParameters `json:"instanceTemplate,omitempty" tf:"instance_template,omitempty"` + + // A set of key/value label pairs to assign to the instance group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Load balancing specifications. The structure is documented below. + LoadBalancer []LoadBalancerInitParameters `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` + + // Timeout for waiting for the VM to become healthy. If the timeout is exceeded, the VM will be turned off based on the deployment policy. Specified in seconds. + MaxCheckingHealthDuration *float64 `json:"maxCheckingHealthDuration,omitempty" tf:"max_checking_health_duration,omitempty"` + + // The name of the instance group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The scaling policy of the instance group. The structure is documented below. + ScalePolicy []ScalePolicyInitParameters `json:"scalePolicy,omitempty" tf:"scale_policy,omitempty"` + + // The ID of the service account authorized for this instance group. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // A set of key/value variables pairs to assign to the instance group. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` +} + +type InstanceGroupObservation struct { + + // The allocation policy of the instance group by zone and region. The structure is documented below. + AllocationPolicy []AllocationPolicyObservation `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + + // Application Load balancing (L7) specifications. The structure is documented below. + ApplicationLoadBalancer []ApplicationLoadBalancerObservation `json:"applicationLoadBalancer,omitempty" tf:"application_load_balancer,omitempty"` + + // The instance group creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Flag that protects the instance group from accidental deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // The deployment policy of the instance group. The structure is documented below. + DeployPolicy []DeployPolicyObservation `json:"deployPolicy,omitempty" tf:"deploy_policy,omitempty"` + + // A description of the instance group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resources belong to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Health check specifications. The structure is documented below. + HealthCheck []HealthCheckObservation `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // The ID of the instance group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The template for creating new instances. The structure is documented below. + InstanceTemplate []InstanceTemplateObservation `json:"instanceTemplate,omitempty" tf:"instance_template,omitempty"` + + Instances []InstancesObservation `json:"instances,omitempty" tf:"instances,omitempty"` + + // A set of key/value label pairs to assign to the instance group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Load balancing specifications. The structure is documented below. + LoadBalancer []LoadBalancerObservation `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` + + // Timeout for waiting for the VM to become healthy. If the timeout is exceeded, the VM will be turned off based on the deployment policy. Specified in seconds. + MaxCheckingHealthDuration *float64 `json:"maxCheckingHealthDuration,omitempty" tf:"max_checking_health_duration,omitempty"` + + // The name of the instance group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The scaling policy of the instance group. The structure is documented below. + ScalePolicy []ScalePolicyObservation `json:"scalePolicy,omitempty" tf:"scale_policy,omitempty"` + + // The ID of the service account authorized for this instance group. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // The status of the instance. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A set of key/value variables pairs to assign to the instance group. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` +} + +type InstanceGroupParameters struct { + + // The allocation policy of the instance group by zone and region. The structure is documented below. + // +kubebuilder:validation:Optional + AllocationPolicy []AllocationPolicyParameters `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + + // Application Load balancing (L7) specifications. The structure is documented below. + // +kubebuilder:validation:Optional + ApplicationLoadBalancer []ApplicationLoadBalancerParameters `json:"applicationLoadBalancer,omitempty" tf:"application_load_balancer,omitempty"` + + // Flag that protects the instance group from accidental deletion. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // The deployment policy of the instance group. The structure is documented below. + // +kubebuilder:validation:Optional + DeployPolicy []DeployPolicyParameters `json:"deployPolicy,omitempty" tf:"deploy_policy,omitempty"` + + // A description of the instance group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resources belong to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Health check specifications. The structure is documented below. + // +kubebuilder:validation:Optional + HealthCheck []HealthCheckParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // The template for creating new instances. The structure is documented below. + // +kubebuilder:validation:Optional + InstanceTemplate []InstanceTemplateParameters `json:"instanceTemplate,omitempty" tf:"instance_template,omitempty"` + + // A set of key/value label pairs to assign to the instance group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Load balancing specifications. The structure is documented below. + // +kubebuilder:validation:Optional + LoadBalancer []LoadBalancerParameters `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` + + // Timeout for waiting for the VM to become healthy. If the timeout is exceeded, the VM will be turned off based on the deployment policy. Specified in seconds. + // +kubebuilder:validation:Optional + MaxCheckingHealthDuration *float64 `json:"maxCheckingHealthDuration,omitempty" tf:"max_checking_health_duration,omitempty"` + + // The name of the instance group. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The scaling policy of the instance group. The structure is documented below. + // +kubebuilder:validation:Optional + ScalePolicy []ScalePolicyParameters `json:"scalePolicy,omitempty" tf:"scale_policy,omitempty"` + + // The ID of the service account authorized for this instance group. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // A set of key/value variables pairs to assign to the instance group. + // +kubebuilder:validation:Optional + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` +} + +type InstanceTagsPoolInitParameters struct { + + // List of tags for instances in zone. + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Availability zone. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type InstanceTagsPoolObservation struct { + + // List of tags for instances in zone. + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Availability zone. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type InstanceTagsPoolParameters struct { + + // List of tags for instances in zone. + // +kubebuilder:validation:Optional + Tags []*string `json:"tags" tf:"tags,omitempty"` + + // Availability zone. + // +kubebuilder:validation:Optional + Zone *string `json:"zone" tf:"zone,omitempty"` +} + +type InstanceTemplateBootDiskInitParameters struct { + + // This value can be used to reference the device under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the existing disk. To set use variables. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Parameters used for creating a disk alongside the instance. The structure is documented below. + InitializeParams []BootDiskInitializeParamsInitParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + + // The access mode to the disk resource. By default a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // When set can be later used to change DiskSpec of actual disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type InstanceTemplateBootDiskObservation struct { + + // This value can be used to reference the device under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the existing disk. To set use variables. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Parameters used for creating a disk alongside the instance. The structure is documented below. + InitializeParams []BootDiskInitializeParamsObservation `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + + // The access mode to the disk resource. By default a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // When set can be later used to change DiskSpec of actual disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type InstanceTemplateBootDiskParameters struct { + + // This value can be used to reference the device under /dev/disk/by-id/. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the existing disk. To set use variables. + // +kubebuilder:validation:Optional + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Parameters used for creating a disk alongside the instance. The structure is documented below. + // +kubebuilder:validation:Optional + InitializeParams []BootDiskInitializeParamsParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + + // The access mode to the disk resource. By default a disk is attached in READ_WRITE mode. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // When set can be later used to change DiskSpec of actual disk. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type InstanceTemplateFilesystemInitParameters struct { + + // This value can be used to reference the device under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the filesystem that should be attached. + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // The access mode to the disk resource. By default a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type InstanceTemplateFilesystemObservation struct { + + // This value can be used to reference the device under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the filesystem that should be attached. + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // The access mode to the disk resource. By default a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type InstanceTemplateFilesystemParameters struct { + + // This value can be used to reference the device under /dev/disk/by-id/. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the filesystem that should be attached. + // +kubebuilder:validation:Optional + FilesystemID *string `json:"filesystemId" tf:"filesystem_id,omitempty"` + + // The access mode to the disk resource. By default a disk is attached in READ_WRITE mode. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type InstanceTemplateInitParameters struct { + + // Boot disk specifications for the instance. The structure is documented below. + BootDisk []InstanceTemplateBootDiskInitParameters `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` + + // A description of the instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // List of filesystems to attach to the instance. The structure is documented below. + Filesystem []InstanceTemplateFilesystemInitParameters `json:"filesystem,omitempty" tf:"filesystem,omitempty"` + + // Hostname template for the instance. + // This field is used to generate the FQDN value of instance. + // The hostname must be unique within the network and region. + // If not specified, the hostname will be equal to id of the instance + // and FQDN will be .auto.internal. Otherwise FQDN will be ..internal. + // In order to be unique it must contain at least on of instance unique placeholders: + // {instance.short_id} + // {instance.index} + // combination of {instance.zone_id} and {instance.index_in_zone} + // Example: my-instance-{instance.index} + // If not set, name value will be used + // It may also contain another placeholders, see metadata doc for full list. + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // A set of key/value label pairs to assign to the instance. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A set of metadata key/value pairs to make available from within the instance. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Options allow user to configure access to managed instances metadata + MetadataOptions []InstanceTemplateMetadataOptionsInitParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // Name template of the instance. + // In order to be unique it must contain at least one of instance unique placeholders: + // {instance.short_id} + // {instance.index} + // combination of {instance.zone_id} and {instance.index_in_zone} + // Example: my-instance-{instance.index} + // If not set, default is used: {instance_group.id}-{instance.short_id} + // It may also contain another placeholders, see metadata doc for full list. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Network specifications for the instance. This can be used multiple times for adding multiple interfaces. The structure is documented below. + NetworkInterface []InstanceTemplateNetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // Network acceleration type for instance. The structure is documented below. + NetworkSettings []NetworkSettingsInitParameters `json:"networkSettings,omitempty" tf:"network_settings,omitempty"` + + // The placement policy configuration. The structure is documented below. + PlacementPolicy []InstanceTemplatePlacementPolicyInitParameters `json:"placementPolicy,omitempty" tf:"placement_policy,omitempty"` + + // The ID of the hardware platform configuration for the instance. The default is 'standard-v1'. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // Compute resource specifications for the instance. The structure is documented below. + Resources []InstanceTemplateResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // The scheduling policy configuration. The structure is documented below. + SchedulingPolicy []InstanceTemplateSchedulingPolicyInitParameters `json:"schedulingPolicy,omitempty" tf:"scheduling_policy,omitempty"` + + // A list of disks to attach to the instance. The structure is documented below. + SecondaryDisk []InstanceTemplateSecondaryDiskInitParameters `json:"secondaryDisk,omitempty" tf:"secondary_disk,omitempty"` + + // The ID of the service account authorized for this instance. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type InstanceTemplateMetadataOptionsInitParameters struct { + AwsV1HTTPEndpoint *float64 `json:"awsV1HttpEndpoint,omitempty" tf:"aws_v1_http_endpoint,omitempty"` + + AwsV1HTTPToken *float64 `json:"awsV1HttpToken,omitempty" tf:"aws_v1_http_token,omitempty"` + + GceHTTPEndpoint *float64 `json:"gceHttpEndpoint,omitempty" tf:"gce_http_endpoint,omitempty"` + + GceHTTPToken *float64 `json:"gceHttpToken,omitempty" tf:"gce_http_token,omitempty"` +} + +type InstanceTemplateMetadataOptionsObservation struct { + AwsV1HTTPEndpoint *float64 `json:"awsV1HttpEndpoint,omitempty" tf:"aws_v1_http_endpoint,omitempty"` + + AwsV1HTTPToken *float64 `json:"awsV1HttpToken,omitempty" tf:"aws_v1_http_token,omitempty"` + + GceHTTPEndpoint *float64 `json:"gceHttpEndpoint,omitempty" tf:"gce_http_endpoint,omitempty"` + + GceHTTPToken *float64 `json:"gceHttpToken,omitempty" tf:"gce_http_token,omitempty"` +} + +type InstanceTemplateMetadataOptionsParameters struct { + + // +kubebuilder:validation:Optional + AwsV1HTTPEndpoint *float64 `json:"awsV1HttpEndpoint,omitempty" tf:"aws_v1_http_endpoint,omitempty"` + + // +kubebuilder:validation:Optional + AwsV1HTTPToken *float64 `json:"awsV1HttpToken,omitempty" tf:"aws_v1_http_token,omitempty"` + + // +kubebuilder:validation:Optional + GceHTTPEndpoint *float64 `json:"gceHttpEndpoint,omitempty" tf:"gce_http_endpoint,omitempty"` + + // +kubebuilder:validation:Optional + GceHTTPToken *float64 `json:"gceHttpToken,omitempty" tf:"gce_http_token,omitempty"` +} + +type InstanceTemplateNetworkInterfaceInitParameters struct { + + // List of dns records. The structure is documented below. + DNSRecord []NetworkInterfaceDNSRecordInitParameters `json:"dnsRecord,omitempty" tf:"dns_record,omitempty"` + + // Manual set static IP address. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // True if IPv4 address allocated for the network interface. + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // Manual set static IPv6 address. + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // List of ipv6 dns records. The structure is documented below. + IPv6DNSRecord []NetworkInterfaceIPv6DNSRecordInitParameters `json:"ipv6DnsRecord,omitempty" tf:"ipv6_dns_record,omitempty"` + + // Flag for using NAT. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // List of nat dns records. The structure is documented below. + NATDNSRecord []NetworkInterfaceNATDNSRecordInitParameters `json:"natDnsRecord,omitempty" tf:"nat_dns_record,omitempty"` + + // A public address that can be used to access the internet over NAT. Use variables to set. + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + + // The ID of the network. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Security group ids for network interface. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // The ID of the subnets to attach this interface to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` +} + +type InstanceTemplateNetworkInterfaceObservation struct { + + // List of dns records. The structure is documented below. + DNSRecord []NetworkInterfaceDNSRecordObservation `json:"dnsRecord,omitempty" tf:"dns_record,omitempty"` + + // Manual set static IP address. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // True if IPv4 address allocated for the network interface. + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // Manual set static IPv6 address. + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // List of ipv6 dns records. The structure is documented below. + IPv6DNSRecord []NetworkInterfaceIPv6DNSRecordObservation `json:"ipv6DnsRecord,omitempty" tf:"ipv6_dns_record,omitempty"` + + // Flag for using NAT. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // List of nat dns records. The structure is documented below. + NATDNSRecord []NetworkInterfaceNATDNSRecordObservation `json:"natDnsRecord,omitempty" tf:"nat_dns_record,omitempty"` + + // A public address that can be used to access the internet over NAT. Use variables to set. + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + + // The ID of the network. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Security group ids for network interface. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The ID of the subnets to attach this interface to. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type InstanceTemplateNetworkInterfaceParameters struct { + + // List of dns records. The structure is documented below. + // +kubebuilder:validation:Optional + DNSRecord []NetworkInterfaceDNSRecordParameters `json:"dnsRecord,omitempty" tf:"dns_record,omitempty"` + + // Manual set static IP address. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // True if IPv4 address allocated for the network interface. + // +kubebuilder:validation:Optional + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + // +kubebuilder:validation:Optional + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // Manual set static IPv6 address. + // +kubebuilder:validation:Optional + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // List of ipv6 dns records. The structure is documented below. + // +kubebuilder:validation:Optional + IPv6DNSRecord []NetworkInterfaceIPv6DNSRecordParameters `json:"ipv6DnsRecord,omitempty" tf:"ipv6_dns_record,omitempty"` + + // Flag for using NAT. + // +kubebuilder:validation:Optional + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // List of nat dns records. The structure is documented below. + // +kubebuilder:validation:Optional + NATDNSRecord []NetworkInterfaceNATDNSRecordParameters `json:"natDnsRecord,omitempty" tf:"nat_dns_record,omitempty"` + + // A public address that can be used to access the internet over NAT. Use variables to set. + // +kubebuilder:validation:Optional + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + + // The ID of the network. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Security group ids for network interface. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // The ID of the subnets to attach this interface to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` +} + +type InstanceTemplateObservation struct { + + // Boot disk specifications for the instance. The structure is documented below. + BootDisk []InstanceTemplateBootDiskObservation `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` + + // A description of the instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // List of filesystems to attach to the instance. The structure is documented below. + Filesystem []InstanceTemplateFilesystemObservation `json:"filesystem,omitempty" tf:"filesystem,omitempty"` + + // Hostname template for the instance. + // This field is used to generate the FQDN value of instance. + // The hostname must be unique within the network and region. + // If not specified, the hostname will be equal to id of the instance + // and FQDN will be .auto.internal. Otherwise FQDN will be ..internal. + // In order to be unique it must contain at least on of instance unique placeholders: + // {instance.short_id} + // {instance.index} + // combination of {instance.zone_id} and {instance.index_in_zone} + // Example: my-instance-{instance.index} + // If not set, name value will be used + // It may also contain another placeholders, see metadata doc for full list. + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // A set of key/value label pairs to assign to the instance. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A set of metadata key/value pairs to make available from within the instance. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Options allow user to configure access to managed instances metadata + MetadataOptions []InstanceTemplateMetadataOptionsObservation `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // Name template of the instance. + // In order to be unique it must contain at least one of instance unique placeholders: + // {instance.short_id} + // {instance.index} + // combination of {instance.zone_id} and {instance.index_in_zone} + // Example: my-instance-{instance.index} + // If not set, default is used: {instance_group.id}-{instance.short_id} + // It may also contain another placeholders, see metadata doc for full list. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Network specifications for the instance. This can be used multiple times for adding multiple interfaces. The structure is documented below. + NetworkInterface []InstanceTemplateNetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // Network acceleration type for instance. The structure is documented below. + NetworkSettings []NetworkSettingsObservation `json:"networkSettings,omitempty" tf:"network_settings,omitempty"` + + // The placement policy configuration. The structure is documented below. + PlacementPolicy []InstanceTemplatePlacementPolicyObservation `json:"placementPolicy,omitempty" tf:"placement_policy,omitempty"` + + // The ID of the hardware platform configuration for the instance. The default is 'standard-v1'. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // Compute resource specifications for the instance. The structure is documented below. + Resources []InstanceTemplateResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // The scheduling policy configuration. The structure is documented below. + SchedulingPolicy []InstanceTemplateSchedulingPolicyObservation `json:"schedulingPolicy,omitempty" tf:"scheduling_policy,omitempty"` + + // A list of disks to attach to the instance. The structure is documented below. + SecondaryDisk []InstanceTemplateSecondaryDiskObservation `json:"secondaryDisk,omitempty" tf:"secondary_disk,omitempty"` + + // The ID of the service account authorized for this instance. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type InstanceTemplateParameters struct { + + // Boot disk specifications for the instance. The structure is documented below. + // +kubebuilder:validation:Optional + BootDisk []InstanceTemplateBootDiskParameters `json:"bootDisk" tf:"boot_disk,omitempty"` + + // A description of the instance. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // List of filesystems to attach to the instance. The structure is documented below. + // +kubebuilder:validation:Optional + Filesystem []InstanceTemplateFilesystemParameters `json:"filesystem,omitempty" tf:"filesystem,omitempty"` + + // Hostname template for the instance. + // This field is used to generate the FQDN value of instance. + // The hostname must be unique within the network and region. + // If not specified, the hostname will be equal to id of the instance + // and FQDN will be .auto.internal. Otherwise FQDN will be ..internal. + // In order to be unique it must contain at least on of instance unique placeholders: + // {instance.short_id} + // {instance.index} + // combination of {instance.zone_id} and {instance.index_in_zone} + // Example: my-instance-{instance.index} + // If not set, name value will be used + // It may also contain another placeholders, see metadata doc for full list. + // +kubebuilder:validation:Optional + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // A set of key/value label pairs to assign to the instance. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A set of metadata key/value pairs to make available from within the instance. + // +kubebuilder:validation:Optional + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Options allow user to configure access to managed instances metadata + // +kubebuilder:validation:Optional + MetadataOptions []InstanceTemplateMetadataOptionsParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // Name template of the instance. + // In order to be unique it must contain at least one of instance unique placeholders: + // {instance.short_id} + // {instance.index} + // combination of {instance.zone_id} and {instance.index_in_zone} + // Example: my-instance-{instance.index} + // If not set, default is used: {instance_group.id}-{instance.short_id} + // It may also contain another placeholders, see metadata doc for full list. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Network specifications for the instance. This can be used multiple times for adding multiple interfaces. The structure is documented below. + // +kubebuilder:validation:Optional + NetworkInterface []InstanceTemplateNetworkInterfaceParameters `json:"networkInterface" tf:"network_interface,omitempty"` + + // Network acceleration type for instance. The structure is documented below. + // +kubebuilder:validation:Optional + NetworkSettings []NetworkSettingsParameters `json:"networkSettings,omitempty" tf:"network_settings,omitempty"` + + // The placement policy configuration. The structure is documented below. + // +kubebuilder:validation:Optional + PlacementPolicy []InstanceTemplatePlacementPolicyParameters `json:"placementPolicy,omitempty" tf:"placement_policy,omitempty"` + + // The ID of the hardware platform configuration for the instance. The default is 'standard-v1'. + // +kubebuilder:validation:Optional + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // Compute resource specifications for the instance. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []InstanceTemplateResourcesParameters `json:"resources" tf:"resources,omitempty"` + + // The scheduling policy configuration. The structure is documented below. + // +kubebuilder:validation:Optional + SchedulingPolicy []InstanceTemplateSchedulingPolicyParameters `json:"schedulingPolicy,omitempty" tf:"scheduling_policy,omitempty"` + + // A list of disks to attach to the instance. The structure is documented below. + // +kubebuilder:validation:Optional + SecondaryDisk []InstanceTemplateSecondaryDiskParameters `json:"secondaryDisk,omitempty" tf:"secondary_disk,omitempty"` + + // The ID of the service account authorized for this instance. + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type InstanceTemplatePlacementPolicyInitParameters struct { + + // Specifies the id of the Placement Group to assign to the instances. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` +} + +type InstanceTemplatePlacementPolicyObservation struct { + + // Specifies the id of the Placement Group to assign to the instances. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` +} + +type InstanceTemplatePlacementPolicyParameters struct { + + // Specifies the id of the Placement Group to assign to the instances. + // +kubebuilder:validation:Optional + PlacementGroupID *string `json:"placementGroupId" tf:"placement_group_id,omitempty"` +} + +type InstanceTemplateResourcesInitParameters struct { + + // If provided, specifies baseline core performance as a percent. + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // The number of CPU cores for the instance. + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + Gpus *float64 `json:"gpus,omitempty" tf:"gpus,omitempty"` + + // The memory size in GB. + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type InstanceTemplateResourcesObservation struct { + + // If provided, specifies baseline core performance as a percent. + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // The number of CPU cores for the instance. + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + Gpus *float64 `json:"gpus,omitempty" tf:"gpus,omitempty"` + + // The memory size in GB. + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type InstanceTemplateResourcesParameters struct { + + // If provided, specifies baseline core performance as a percent. + // +kubebuilder:validation:Optional + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // The number of CPU cores for the instance. + // +kubebuilder:validation:Optional + Cores *float64 `json:"cores" tf:"cores,omitempty"` + + // +kubebuilder:validation:Optional + Gpus *float64 `json:"gpus,omitempty" tf:"gpus,omitempty"` + + // The memory size in GB. + // +kubebuilder:validation:Optional + Memory *float64 `json:"memory" tf:"memory,omitempty"` +} + +type InstanceTemplateSchedulingPolicyInitParameters struct { + + // Specifies if the instance is preemptible. Defaults to false. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` +} + +type InstanceTemplateSchedulingPolicyObservation struct { + + // Specifies if the instance is preemptible. Defaults to false. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` +} + +type InstanceTemplateSchedulingPolicyParameters struct { + + // Specifies if the instance is preemptible. Defaults to false. + // +kubebuilder:validation:Optional + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` +} + +type InstanceTemplateSecondaryDiskInitParameters struct { + + // This value can be used to reference the device under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the existing disk. To set use variables. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Parameters used for creating a disk alongside the instance. The structure is documented below. + InitializeParams []SecondaryDiskInitializeParamsInitParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + + // The access mode to the disk resource. By default a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // When set can be later used to change DiskSpec of actual disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type InstanceTemplateSecondaryDiskObservation struct { + + // This value can be used to reference the device under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the existing disk. To set use variables. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Parameters used for creating a disk alongside the instance. The structure is documented below. + InitializeParams []SecondaryDiskInitializeParamsObservation `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + + // The access mode to the disk resource. By default a disk is attached in READ_WRITE mode. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // When set can be later used to change DiskSpec of actual disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type InstanceTemplateSecondaryDiskParameters struct { + + // This value can be used to reference the device under /dev/disk/by-id/. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // ID of the existing disk. To set use variables. + // +kubebuilder:validation:Optional + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // Parameters used for creating a disk alongside the instance. The structure is documented below. + // +kubebuilder:validation:Optional + InitializeParams []SecondaryDiskInitializeParamsParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + + // The access mode to the disk resource. By default a disk is attached in READ_WRITE mode. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // When set can be later used to change DiskSpec of actual disk. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type InstancesInitParameters struct { +} + +type InstancesNetworkInterfaceInitParameters struct { +} + +type InstancesNetworkInterfaceObservation struct { + + // The private IP address to assign to the instance. If empty, the address is automatically assigned from the specified subnet. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // True if IPv4 address allocated for the network interface. + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // Manual set static IPv6 address. + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // The index of the network interface as generated by the server. + Index *float64 `json:"index,omitempty" tf:"index,omitempty"` + + // The MAC address assigned to the network interface. + MacAddress *string `json:"macAddress,omitempty" tf:"mac_address,omitempty"` + + // The instance's public address for accessing the internet over NAT. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // The public IP address of the instance. + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + + // The IP version for the public address. + NATIPVersion *string `json:"natIpVersion,omitempty" tf:"nat_ip_version,omitempty"` + + // The ID of the subnet to attach this interface to. The subnet must reside in the same zone where this instance was created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type InstancesNetworkInterfaceParameters struct { +} + +type InstancesObservation struct { + + // The Fully Qualified Domain Name. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // The ID of the instance. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + InstanceTag *string `json:"instanceTag,omitempty" tf:"instance_tag,omitempty"` + + // The name of the managed instance. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An array with the network interfaces attached to the managed instance. + NetworkInterface []InstancesNetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // The status of the instance. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + StatusChangedAt *string `json:"statusChangedAt,omitempty" tf:"status_changed_at,omitempty"` + + // The status message of the instance. + StatusMessage *string `json:"statusMessage,omitempty" tf:"status_message,omitempty"` + + // The ID of the availability zone where the instance resides. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type InstancesParameters struct { +} + +type LoadBalancerInitParameters struct { + + // Do not wait load balancer health checks. + IgnoreHealthChecks *bool `json:"ignoreHealthChecks,omitempty" tf:"ignore_health_checks,omitempty"` + + // Timeout for waiting for the VM to be checked by the load balancer. If the timeout is exceeded, the VM will be turned off based on the deployment policy. Specified in seconds. + MaxOpeningTrafficDuration *float64 `json:"maxOpeningTrafficDuration,omitempty" tf:"max_opening_traffic_duration,omitempty"` + + // A description of the target group. + TargetGroupDescription *string `json:"targetGroupDescription,omitempty" tf:"target_group_description,omitempty"` + + // A set of key/value label pairs. + // +mapType=granular + TargetGroupLabels map[string]*string `json:"targetGroupLabels,omitempty" tf:"target_group_labels,omitempty"` + + // The name of the target group. + TargetGroupName *string `json:"targetGroupName,omitempty" tf:"target_group_name,omitempty"` +} + +type LoadBalancerObservation struct { + + // Do not wait load balancer health checks. + IgnoreHealthChecks *bool `json:"ignoreHealthChecks,omitempty" tf:"ignore_health_checks,omitempty"` + + // Timeout for waiting for the VM to be checked by the load balancer. If the timeout is exceeded, the VM will be turned off based on the deployment policy. Specified in seconds. + MaxOpeningTrafficDuration *float64 `json:"maxOpeningTrafficDuration,omitempty" tf:"max_opening_traffic_duration,omitempty"` + + // The status message of the target group. + StatusMessage *string `json:"statusMessage,omitempty" tf:"status_message,omitempty"` + + // A description of the target group. + TargetGroupDescription *string `json:"targetGroupDescription,omitempty" tf:"target_group_description,omitempty"` + + // The ID of the target group. + TargetGroupID *string `json:"targetGroupId,omitempty" tf:"target_group_id,omitempty"` + + // A set of key/value label pairs. + // +mapType=granular + TargetGroupLabels map[string]*string `json:"targetGroupLabels,omitempty" tf:"target_group_labels,omitempty"` + + // The name of the target group. + TargetGroupName *string `json:"targetGroupName,omitempty" tf:"target_group_name,omitempty"` +} + +type LoadBalancerParameters struct { + + // Do not wait load balancer health checks. + // +kubebuilder:validation:Optional + IgnoreHealthChecks *bool `json:"ignoreHealthChecks,omitempty" tf:"ignore_health_checks,omitempty"` + + // Timeout for waiting for the VM to be checked by the load balancer. If the timeout is exceeded, the VM will be turned off based on the deployment policy. Specified in seconds. + // +kubebuilder:validation:Optional + MaxOpeningTrafficDuration *float64 `json:"maxOpeningTrafficDuration,omitempty" tf:"max_opening_traffic_duration,omitempty"` + + // A description of the target group. + // +kubebuilder:validation:Optional + TargetGroupDescription *string `json:"targetGroupDescription,omitempty" tf:"target_group_description,omitempty"` + + // A set of key/value label pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + TargetGroupLabels map[string]*string `json:"targetGroupLabels,omitempty" tf:"target_group_labels,omitempty"` + + // The name of the target group. + // +kubebuilder:validation:Optional + TargetGroupName *string `json:"targetGroupName,omitempty" tf:"target_group_name,omitempty"` +} + +type NetworkInterfaceDNSRecordInitParameters struct { + + // DNS zone id (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record fqdn (must have dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkInterfaceDNSRecordObservation struct { + + // DNS zone id (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record fqdn (must have dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkInterfaceDNSRecordParameters struct { + + // DNS zone id (if not set, private zone used). + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record fqdn (must have dot at the end). + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + + // When set to true, also create PTR DNS record. + // +kubebuilder:validation:Optional + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkInterfaceIPv6DNSRecordInitParameters struct { + + // DNS zone id (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record fqdn (must have dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkInterfaceIPv6DNSRecordObservation struct { + + // DNS zone id (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record fqdn (must have dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkInterfaceIPv6DNSRecordParameters struct { + + // DNS zone id (if not set, private zone used). + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record fqdn (must have dot at the end). + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + + // When set to true, also create PTR DNS record. + // +kubebuilder:validation:Optional + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkInterfaceNATDNSRecordInitParameters struct { + + // DNS zone id (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record fqdn (must have dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkInterfaceNATDNSRecordObservation struct { + + // DNS zone id (if not set, private zone used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record fqdn (must have dot at the end). + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkInterfaceNATDNSRecordParameters struct { + + // DNS zone id (if not set, private zone used). + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record fqdn (must have dot at the end). + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + + // When set to true, also create PTR DNS record. + // +kubebuilder:validation:Optional + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL. + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type NetworkSettingsInitParameters struct { + + // Network acceleration type. By default a network is in STANDARD mode. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type NetworkSettingsObservation struct { + + // Network acceleration type. By default a network is in STANDARD mode. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type NetworkSettingsParameters struct { + + // Network acceleration type. By default a network is in STANDARD mode. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ScalePolicyInitParameters struct { + + // The auto scaling policy of the instance group. The structure is documented below. + AutoScale []AutoScaleInitParameters `json:"autoScale,omitempty" tf:"auto_scale,omitempty"` + + // The fixed scaling policy of the instance group. The structure is documented below. + FixedScale []FixedScaleInitParameters `json:"fixedScale,omitempty" tf:"fixed_scale,omitempty"` + + // The test auto scaling policy of the instance group. Use it to test how the auto scale works. The structure is documented below. + TestAutoScale []TestAutoScaleInitParameters `json:"testAutoScale,omitempty" tf:"test_auto_scale,omitempty"` +} + +type ScalePolicyObservation struct { + + // The auto scaling policy of the instance group. The structure is documented below. + AutoScale []AutoScaleObservation `json:"autoScale,omitempty" tf:"auto_scale,omitempty"` + + // The fixed scaling policy of the instance group. The structure is documented below. + FixedScale []FixedScaleObservation `json:"fixedScale,omitempty" tf:"fixed_scale,omitempty"` + + // The test auto scaling policy of the instance group. Use it to test how the auto scale works. The structure is documented below. + TestAutoScale []TestAutoScaleObservation `json:"testAutoScale,omitempty" tf:"test_auto_scale,omitempty"` +} + +type ScalePolicyParameters struct { + + // The auto scaling policy of the instance group. The structure is documented below. + // +kubebuilder:validation:Optional + AutoScale []AutoScaleParameters `json:"autoScale,omitempty" tf:"auto_scale,omitempty"` + + // The fixed scaling policy of the instance group. The structure is documented below. + // +kubebuilder:validation:Optional + FixedScale []FixedScaleParameters `json:"fixedScale,omitempty" tf:"fixed_scale,omitempty"` + + // The test auto scaling policy of the instance group. Use it to test how the auto scale works. The structure is documented below. + // +kubebuilder:validation:Optional + TestAutoScale []TestAutoScaleParameters `json:"testAutoScale,omitempty" tf:"test_auto_scale,omitempty"` +} + +type SecondaryDiskInitializeParamsInitParameters struct { + + // A description of the instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The disk image to initialize this disk from. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // The size of the disk in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The snapshot to initialize this disk from. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Network acceleration type. By default a network is in STANDARD mode. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SecondaryDiskInitializeParamsObservation struct { + + // A description of the instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The disk image to initialize this disk from. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // The size of the disk in GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The snapshot to initialize this disk from. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Network acceleration type. By default a network is in STANDARD mode. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SecondaryDiskInitializeParamsParameters struct { + + // A description of the instance. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The disk image to initialize this disk from. + // +kubebuilder:validation:Optional + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // The size of the disk in GB. + // +kubebuilder:validation:Optional + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The snapshot to initialize this disk from. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Network acceleration type. By default a network is in STANDARD mode. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TCPOptionsInitParameters struct { + + // The port used for TCP health checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type TCPOptionsObservation struct { + + // The port used for TCP health checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type TCPOptionsParameters struct { + + // The port used for TCP health checks. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` +} + +type TestAutoScaleCustomRuleInitParameters struct { + + // The ID of the folder that the resources belong to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // A set of key/value label pairs to assign to the instance group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metric type, GAUGE or COUNTER. + MetricType *string `json:"metricType,omitempty" tf:"metric_type,omitempty"` + + // Rule type: UTILIZATION - This type means that the metric applies to one instance. First, Instance Groups calculates the average metric value for each instance, then averages the values for instances in one availability zone. This type of metric must have the instance_id label. WORKLOAD - This type means that the metric applies to instances in one availability zone. This type of metric must have the zone_id label. + RuleType *string `json:"ruleType,omitempty" tf:"rule_type,omitempty"` + + // Service of custom metric in Yandex Monitoring that should be used for scaling. + Service *string `json:"service,omitempty" tf:"service,omitempty"` + + // Target metric value level. + Target *float64 `json:"target,omitempty" tf:"target,omitempty"` +} + +type TestAutoScaleCustomRuleObservation struct { + + // The ID of the folder that the resources belong to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // A set of key/value label pairs to assign to the instance group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metric type, GAUGE or COUNTER. + MetricType *string `json:"metricType,omitempty" tf:"metric_type,omitempty"` + + // Rule type: UTILIZATION - This type means that the metric applies to one instance. First, Instance Groups calculates the average metric value for each instance, then averages the values for instances in one availability zone. This type of metric must have the instance_id label. WORKLOAD - This type means that the metric applies to instances in one availability zone. This type of metric must have the zone_id label. + RuleType *string `json:"ruleType,omitempty" tf:"rule_type,omitempty"` + + // Service of custom metric in Yandex Monitoring that should be used for scaling. + Service *string `json:"service,omitempty" tf:"service,omitempty"` + + // Target metric value level. + Target *float64 `json:"target,omitempty" tf:"target,omitempty"` +} + +type TestAutoScaleCustomRuleParameters struct { + + // The ID of the folder that the resources belong to. + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // A set of key/value label pairs to assign to the instance group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // Metric type, GAUGE or COUNTER. + // +kubebuilder:validation:Optional + MetricType *string `json:"metricType" tf:"metric_type,omitempty"` + + // Rule type: UTILIZATION - This type means that the metric applies to one instance. First, Instance Groups calculates the average metric value for each instance, then averages the values for instances in one availability zone. This type of metric must have the instance_id label. WORKLOAD - This type means that the metric applies to instances in one availability zone. This type of metric must have the zone_id label. + // +kubebuilder:validation:Optional + RuleType *string `json:"ruleType" tf:"rule_type,omitempty"` + + // Service of custom metric in Yandex Monitoring that should be used for scaling. + // +kubebuilder:validation:Optional + Service *string `json:"service,omitempty" tf:"service,omitempty"` + + // Target metric value level. + // +kubebuilder:validation:Optional + Target *float64 `json:"target" tf:"target,omitempty"` +} + +type TestAutoScaleInitParameters struct { + + // . Autoscale type, can be ZONAL or REGIONAL. By default ZONAL type is used. + AutoScaleType *string `json:"autoScaleType,omitempty" tf:"auto_scale_type,omitempty"` + + // Target CPU load level. + CPUUtilizationTarget *float64 `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + + // A list of custom rules. The structure is documented below. + CustomRule []TestAutoScaleCustomRuleInitParameters `json:"customRule,omitempty" tf:"custom_rule,omitempty"` + + // The initial number of instances in the instance group. + InitialSize *float64 `json:"initialSize,omitempty" tf:"initial_size,omitempty"` + + // The maximum number of virtual machines in the group. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // The amount of time, in seconds, that metrics are averaged for. If the average value at the end of the interval is higher than the cpu_utilization_target, the instance group will increase the number of virtual machines in the group. + MeasurementDuration *float64 `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + + // The minimum number of virtual machines in a single availability zone. + MinZoneSize *float64 `json:"minZoneSize,omitempty" tf:"min_zone_size,omitempty"` + + // The minimum time interval, in seconds, to monitor the load before an instance group can reduce the number of virtual machines in the group. During this time, the group will not decrease even if the average load falls below the value of cpu_utilization_target. + StabilizationDuration *float64 `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + + // The warm-up time of the virtual machine, in seconds. During this time, traffic is fed to the virtual machine, but load metrics are not taken into account. + WarmupDuration *float64 `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` +} + +type TestAutoScaleObservation struct { + + // . Autoscale type, can be ZONAL or REGIONAL. By default ZONAL type is used. + AutoScaleType *string `json:"autoScaleType,omitempty" tf:"auto_scale_type,omitempty"` + + // Target CPU load level. + CPUUtilizationTarget *float64 `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + + // A list of custom rules. The structure is documented below. + CustomRule []TestAutoScaleCustomRuleObservation `json:"customRule,omitempty" tf:"custom_rule,omitempty"` + + // The initial number of instances in the instance group. + InitialSize *float64 `json:"initialSize,omitempty" tf:"initial_size,omitempty"` + + // The maximum number of virtual machines in the group. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // The amount of time, in seconds, that metrics are averaged for. If the average value at the end of the interval is higher than the cpu_utilization_target, the instance group will increase the number of virtual machines in the group. + MeasurementDuration *float64 `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + + // The minimum number of virtual machines in a single availability zone. + MinZoneSize *float64 `json:"minZoneSize,omitempty" tf:"min_zone_size,omitempty"` + + // The minimum time interval, in seconds, to monitor the load before an instance group can reduce the number of virtual machines in the group. During this time, the group will not decrease even if the average load falls below the value of cpu_utilization_target. + StabilizationDuration *float64 `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + + // The warm-up time of the virtual machine, in seconds. During this time, traffic is fed to the virtual machine, but load metrics are not taken into account. + WarmupDuration *float64 `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` +} + +type TestAutoScaleParameters struct { + + // . Autoscale type, can be ZONAL or REGIONAL. By default ZONAL type is used. + // +kubebuilder:validation:Optional + AutoScaleType *string `json:"autoScaleType,omitempty" tf:"auto_scale_type,omitempty"` + + // Target CPU load level. + // +kubebuilder:validation:Optional + CPUUtilizationTarget *float64 `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + + // A list of custom rules. The structure is documented below. + // +kubebuilder:validation:Optional + CustomRule []TestAutoScaleCustomRuleParameters `json:"customRule,omitempty" tf:"custom_rule,omitempty"` + + // The initial number of instances in the instance group. + // +kubebuilder:validation:Optional + InitialSize *float64 `json:"initialSize" tf:"initial_size,omitempty"` + + // The maximum number of virtual machines in the group. + // +kubebuilder:validation:Optional + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // The amount of time, in seconds, that metrics are averaged for. If the average value at the end of the interval is higher than the cpu_utilization_target, the instance group will increase the number of virtual machines in the group. + // +kubebuilder:validation:Optional + MeasurementDuration *float64 `json:"measurementDuration" tf:"measurement_duration,omitempty"` + + // The minimum number of virtual machines in a single availability zone. + // +kubebuilder:validation:Optional + MinZoneSize *float64 `json:"minZoneSize,omitempty" tf:"min_zone_size,omitempty"` + + // The minimum time interval, in seconds, to monitor the load before an instance group can reduce the number of virtual machines in the group. During this time, the group will not decrease even if the average load falls below the value of cpu_utilization_target. + // +kubebuilder:validation:Optional + StabilizationDuration *float64 `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + + // The warm-up time of the virtual machine, in seconds. During this time, traffic is fed to the virtual machine, but load metrics are not taken into account. + // +kubebuilder:validation:Optional + WarmupDuration *float64 `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` +} + +// InstanceGroupSpec defines the desired state of InstanceGroup +type InstanceGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InstanceGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InstanceGroupInitParameters `json:"initProvider,omitempty"` +} + +// InstanceGroupStatus defines the observed state of InstanceGroup. +type InstanceGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InstanceGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// InstanceGroup is the Schema for the InstanceGroups API. Manages an Instance group resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type InstanceGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.allocationPolicy) || (has(self.initProvider) && has(self.initProvider.allocationPolicy))",message="spec.forProvider.allocationPolicy is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.deployPolicy) || (has(self.initProvider) && has(self.initProvider.deployPolicy))",message="spec.forProvider.deployPolicy is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instanceTemplate) || (has(self.initProvider) && has(self.initProvider.instanceTemplate))",message="spec.forProvider.instanceTemplate is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scalePolicy) || (has(self.initProvider) && has(self.initProvider.scalePolicy))",message="spec.forProvider.scalePolicy is a required parameter" + Spec InstanceGroupSpec `json:"spec"` + Status InstanceGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceGroupList contains a list of InstanceGroups +type InstanceGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []InstanceGroup `json:"items"` +} + +// Repository type metadata. +var ( + InstanceGroup_Kind = "InstanceGroup" + InstanceGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: InstanceGroup_Kind}.String() + InstanceGroup_KindAPIVersion = InstanceGroup_Kind + "." + CRDGroupVersion.String() + InstanceGroup_GroupVersionKind = CRDGroupVersion.WithKind(InstanceGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&InstanceGroup{}, &InstanceGroupList{}) +} diff --git a/apis/compute/v1alpha1/zz_instanceiambinding_terraformed.go b/apis/compute/v1alpha1/zz_instanceiambinding_terraformed.go new file mode 100755 index 0000000..ade3eec --- /dev/null +++ b/apis/compute/v1alpha1/zz_instanceiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this InstanceIAMBinding +func (mg *InstanceIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_instance_iam_binding" +} + +// GetConnectionDetailsMapping for this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this InstanceIAMBinding +func (tr *InstanceIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this InstanceIAMBinding +func (tr *InstanceIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this InstanceIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *InstanceIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &InstanceIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *InstanceIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_instanceiambinding_types.go b/apis/compute/v1alpha1/zz_instanceiambinding_types.go new file mode 100755 index 0000000..6fb659b --- /dev/null +++ b/apis/compute/v1alpha1/zz_instanceiambinding_types.go @@ -0,0 +1,117 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type InstanceIAMBindingInitParameters struct { + + // ID of the instance to attach the policy to. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_instance_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type InstanceIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ID of the instance to attach the policy to. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_instance_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type InstanceIAMBindingParameters struct { + + // ID of the instance to attach the policy to. + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_instance_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// InstanceIAMBindingSpec defines the desired state of InstanceIAMBinding +type InstanceIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InstanceIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InstanceIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// InstanceIAMBindingStatus defines the observed state of InstanceIAMBinding. +type InstanceIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InstanceIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// InstanceIAMBinding is the Schema for the InstanceIAMBindings API. Allows management of a single IAM binding for an instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type InstanceIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instanceId) || (has(self.initProvider) && has(self.initProvider.instanceId))",message="spec.forProvider.instanceId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec InstanceIAMBindingSpec `json:"spec"` + Status InstanceIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceIAMBindingList contains a list of InstanceIAMBindings +type InstanceIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []InstanceIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + InstanceIAMBinding_Kind = "InstanceIAMBinding" + InstanceIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: InstanceIAMBinding_Kind}.String() + InstanceIAMBinding_KindAPIVersion = InstanceIAMBinding_Kind + "." + CRDGroupVersion.String() + InstanceIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(InstanceIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&InstanceIAMBinding{}, &InstanceIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_placementgroup_terraformed.go b/apis/compute/v1alpha1/zz_placementgroup_terraformed.go new file mode 100755 index 0000000..6db56b0 --- /dev/null +++ b/apis/compute/v1alpha1/zz_placementgroup_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PlacementGroup +func (mg *PlacementGroup) GetTerraformResourceType() string { + return "yandex_compute_placement_group" +} + +// GetConnectionDetailsMapping for this PlacementGroup +func (tr *PlacementGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PlacementGroup +func (tr *PlacementGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PlacementGroup +func (tr *PlacementGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PlacementGroup +func (tr *PlacementGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PlacementGroup +func (tr *PlacementGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PlacementGroup +func (tr *PlacementGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PlacementGroup +func (tr *PlacementGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PlacementGroup +func (tr *PlacementGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PlacementGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PlacementGroup) LateInitialize(attrs []byte) (bool, error) { + params := &PlacementGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PlacementGroup) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/compute/v1alpha1/zz_placementgroup_types.go b/apis/compute/v1alpha1/zz_placementgroup_types.go new file mode 100755 index 0000000..452f40d --- /dev/null +++ b/apis/compute/v1alpha1/zz_placementgroup_types.go @@ -0,0 +1,164 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PlacementGroupInitParameters struct { + + // A description of the Placement Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Placement Group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the Placement Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A number of partitions in the placement strategy with partitions policy of the Placement Group (conflicts with placement_strategy_spread). + PlacementStrategyPartitions *float64 `json:"placementStrategyPartitions,omitempty" tf:"placement_strategy_partitions,omitempty"` + + // A placement strategy with spread policy of the Placement Group. Should be true or unset (conflicts with placement_strategy_partitions). + PlacementStrategySpread *bool `json:"placementStrategySpread,omitempty" tf:"placement_strategy_spread,omitempty"` +} + +type PlacementGroupObservation struct { + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // A description of the Placement Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the Placement Group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the Placement Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A number of partitions in the placement strategy with partitions policy of the Placement Group (conflicts with placement_strategy_spread). + PlacementStrategyPartitions *float64 `json:"placementStrategyPartitions,omitempty" tf:"placement_strategy_partitions,omitempty"` + + // A placement strategy with spread policy of the Placement Group. Should be true or unset (conflicts with placement_strategy_partitions). + PlacementStrategySpread *bool `json:"placementStrategySpread,omitempty" tf:"placement_strategy_spread,omitempty"` +} + +type PlacementGroupParameters struct { + + // A description of the Placement Group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Placement Group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the Placement Group. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A number of partitions in the placement strategy with partitions policy of the Placement Group (conflicts with placement_strategy_spread). + // +kubebuilder:validation:Optional + PlacementStrategyPartitions *float64 `json:"placementStrategyPartitions,omitempty" tf:"placement_strategy_partitions,omitempty"` + + // A placement strategy with spread policy of the Placement Group. Should be true or unset (conflicts with placement_strategy_partitions). + // +kubebuilder:validation:Optional + PlacementStrategySpread *bool `json:"placementStrategySpread,omitempty" tf:"placement_strategy_spread,omitempty"` +} + +// PlacementGroupSpec defines the desired state of PlacementGroup +type PlacementGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PlacementGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PlacementGroupInitParameters `json:"initProvider,omitempty"` +} + +// PlacementGroupStatus defines the observed state of PlacementGroup. +type PlacementGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PlacementGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// PlacementGroup is the Schema for the PlacementGroups API. Manages a Placement group resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type PlacementGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PlacementGroupSpec `json:"spec"` + Status PlacementGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PlacementGroupList contains a list of PlacementGroups +type PlacementGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PlacementGroup `json:"items"` +} + +// Repository type metadata. +var ( + PlacementGroup_Kind = "PlacementGroup" + PlacementGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PlacementGroup_Kind}.String() + PlacementGroup_KindAPIVersion = PlacementGroup_Kind + "." + CRDGroupVersion.String() + PlacementGroup_GroupVersionKind = CRDGroupVersion.WithKind(PlacementGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&PlacementGroup{}, &PlacementGroupList{}) +} diff --git a/apis/compute/v1alpha1/zz_placementgroupiambinding_terraformed.go b/apis/compute/v1alpha1/zz_placementgroupiambinding_terraformed.go new file mode 100755 index 0000000..82bc109 --- /dev/null +++ b/apis/compute/v1alpha1/zz_placementgroupiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PlacementGroupIAMBinding +func (mg *PlacementGroupIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_placement_group_iam_binding" +} + +// GetConnectionDetailsMapping for this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PlacementGroupIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PlacementGroupIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &PlacementGroupIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PlacementGroupIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_placementgroupiambinding_types.go b/apis/compute/v1alpha1/zz_placementgroupiambinding_types.go new file mode 100755 index 0000000..9e90c79 --- /dev/null +++ b/apis/compute/v1alpha1/zz_placementgroupiambinding_types.go @@ -0,0 +1,117 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PlacementGroupIAMBindingInitParameters struct { + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // ID of the placement group to attach the policy to. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` + + // The role that should be assigned. Only one yandex_compute_placement_group_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type PlacementGroupIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // ID of the placement group to attach the policy to. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` + + // The role that should be assigned. Only one yandex_compute_placement_group_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type PlacementGroupIAMBindingParameters struct { + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // ID of the placement group to attach the policy to. + // +kubebuilder:validation:Optional + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` + + // The role that should be assigned. Only one yandex_compute_placement_group_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// PlacementGroupIAMBindingSpec defines the desired state of PlacementGroupIAMBinding +type PlacementGroupIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PlacementGroupIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PlacementGroupIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// PlacementGroupIAMBindingStatus defines the observed state of PlacementGroupIAMBinding. +type PlacementGroupIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PlacementGroupIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// PlacementGroupIAMBinding is the Schema for the PlacementGroupIAMBindings API. Allows management of a single IAM binding for a Placement Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type PlacementGroupIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.placementGroupId) || (has(self.initProvider) && has(self.initProvider.placementGroupId))",message="spec.forProvider.placementGroupId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec PlacementGroupIAMBindingSpec `json:"spec"` + Status PlacementGroupIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PlacementGroupIAMBindingList contains a list of PlacementGroupIAMBindings +type PlacementGroupIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PlacementGroupIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + PlacementGroupIAMBinding_Kind = "PlacementGroupIAMBinding" + PlacementGroupIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PlacementGroupIAMBinding_Kind}.String() + PlacementGroupIAMBinding_KindAPIVersion = PlacementGroupIAMBinding_Kind + "." + CRDGroupVersion.String() + PlacementGroupIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(PlacementGroupIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&PlacementGroupIAMBinding{}, &PlacementGroupIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_snapshot_terraformed.go b/apis/compute/v1alpha1/zz_snapshot_terraformed.go new file mode 100755 index 0000000..948dc07 --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshot_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Snapshot +func (mg *Snapshot) GetTerraformResourceType() string { + return "yandex_compute_snapshot" +} + +// GetConnectionDetailsMapping for this Snapshot +func (tr *Snapshot) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Snapshot +func (tr *Snapshot) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Snapshot +func (tr *Snapshot) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Snapshot +func (tr *Snapshot) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Snapshot +func (tr *Snapshot) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Snapshot +func (tr *Snapshot) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Snapshot +func (tr *Snapshot) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Snapshot +func (tr *Snapshot) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Snapshot using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Snapshot) LateInitialize(attrs []byte) (bool, error) { + params := &SnapshotParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Snapshot) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_snapshot_types.go b/apis/compute/v1alpha1/zz_snapshot_types.go new file mode 100755 index 0000000..e5cd568 --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshot_types.go @@ -0,0 +1,256 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SnapshotHardwareGenerationGeneration2FeaturesInitParameters struct { +} + +type SnapshotHardwareGenerationGeneration2FeaturesObservation struct { +} + +type SnapshotHardwareGenerationGeneration2FeaturesParameters struct { +} + +type SnapshotHardwareGenerationInitParameters struct { + + // A newer hardware generation, which always uses PCI_TOPOLOGY_V2 and UEFI boot. + Generation2Features []SnapshotHardwareGenerationGeneration2FeaturesInitParameters `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + // Defines the first known hardware generation and its features, which are: + LegacyFeatures []SnapshotHardwareGenerationLegacyFeaturesInitParameters `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type SnapshotHardwareGenerationLegacyFeaturesInitParameters struct { + + // A variant of PCI topology, one of PCI_TOPOLOGY_V1 or PCI_TOPOLOGY_V2. + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +type SnapshotHardwareGenerationLegacyFeaturesObservation struct { + + // A variant of PCI topology, one of PCI_TOPOLOGY_V1 or PCI_TOPOLOGY_V2. + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +type SnapshotHardwareGenerationLegacyFeaturesParameters struct { + + // A variant of PCI topology, one of PCI_TOPOLOGY_V1 or PCI_TOPOLOGY_V2. + // +kubebuilder:validation:Optional + PciTopology *string `json:"pciTopology,omitempty" tf:"pci_topology,omitempty"` +} + +type SnapshotHardwareGenerationObservation struct { + + // A newer hardware generation, which always uses PCI_TOPOLOGY_V2 and UEFI boot. + Generation2Features []SnapshotHardwareGenerationGeneration2FeaturesParameters `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + // Defines the first known hardware generation and its features, which are: + LegacyFeatures []SnapshotHardwareGenerationLegacyFeaturesObservation `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type SnapshotHardwareGenerationParameters struct { + + // A newer hardware generation, which always uses PCI_TOPOLOGY_V2 and UEFI boot. + // +kubebuilder:validation:Optional + Generation2Features []SnapshotHardwareGenerationGeneration2FeaturesParameters `json:"generation2Features,omitempty" tf:"generation2_features,omitempty"` + + // Defines the first known hardware generation and its features, which are: + // +kubebuilder:validation:Optional + LegacyFeatures []SnapshotHardwareGenerationLegacyFeaturesParameters `json:"legacyFeatures,omitempty" tf:"legacy_features,omitempty"` +} + +type SnapshotInitParameters struct { + + // Description of the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Hardware generation and its features, + // which will be applied to the instance when this snapshot is used as a boot + // disk source. Provide this property if you wish to override this value, which + // otherwise is inherited from the source. The structure is documented below. + HardwareGeneration []SnapshotHardwareGenerationInitParameters `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + // A set of key/value label pairs to assign to the snapshot. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A name for the resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the disk to create a snapshot from. + // +crossplane:generate:reference:type=Disk + SourceDiskID *string `json:"sourceDiskId,omitempty" tf:"source_disk_id,omitempty"` + + // Reference to a Disk to populate sourceDiskId. + // +kubebuilder:validation:Optional + SourceDiskIDRef *v1.Reference `json:"sourceDiskIdRef,omitempty" tf:"-"` + + // Selector for a Disk to populate sourceDiskId. + // +kubebuilder:validation:Optional + SourceDiskIDSelector *v1.Selector `json:"sourceDiskIdSelector,omitempty" tf:"-"` +} + +type SnapshotObservation struct { + + // Creation timestamp of the snapshot. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Size of the disk when the snapshot was created, specified in GB. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Hardware generation and its features, + // which will be applied to the instance when this snapshot is used as a boot + // disk source. Provide this property if you wish to override this value, which + // otherwise is inherited from the source. The structure is documented below. + HardwareGeneration []SnapshotHardwareGenerationObservation `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the snapshot. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A name for the resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the disk to create a snapshot from. + SourceDiskID *string `json:"sourceDiskId,omitempty" tf:"source_disk_id,omitempty"` + + // Size of the snapshot, specified in GB. + StorageSize *float64 `json:"storageSize,omitempty" tf:"storage_size,omitempty"` +} + +type SnapshotParameters struct { + + // Description of the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Hardware generation and its features, + // which will be applied to the instance when this snapshot is used as a boot + // disk source. Provide this property if you wish to override this value, which + // otherwise is inherited from the source. The structure is documented below. + // +kubebuilder:validation:Optional + HardwareGeneration []SnapshotHardwareGenerationParameters `json:"hardwareGeneration,omitempty" tf:"hardware_generation,omitempty"` + + // A set of key/value label pairs to assign to the snapshot. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A name for the resource. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the disk to create a snapshot from. + // +crossplane:generate:reference:type=Disk + // +kubebuilder:validation:Optional + SourceDiskID *string `json:"sourceDiskId,omitempty" tf:"source_disk_id,omitempty"` + + // Reference to a Disk to populate sourceDiskId. + // +kubebuilder:validation:Optional + SourceDiskIDRef *v1.Reference `json:"sourceDiskIdRef,omitempty" tf:"-"` + + // Selector for a Disk to populate sourceDiskId. + // +kubebuilder:validation:Optional + SourceDiskIDSelector *v1.Selector `json:"sourceDiskIdSelector,omitempty" tf:"-"` +} + +// SnapshotSpec defines the desired state of Snapshot +type SnapshotSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SnapshotParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SnapshotInitParameters `json:"initProvider,omitempty"` +} + +// SnapshotStatus defines the observed state of Snapshot. +type SnapshotStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SnapshotObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Snapshot is the Schema for the Snapshots API. Creates a new snapshot of a disk. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Snapshot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SnapshotSpec `json:"spec"` + Status SnapshotStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SnapshotList contains a list of Snapshots +type SnapshotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Snapshot `json:"items"` +} + +// Repository type metadata. +var ( + Snapshot_Kind = "Snapshot" + Snapshot_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Snapshot_Kind}.String() + Snapshot_KindAPIVersion = Snapshot_Kind + "." + CRDGroupVersion.String() + Snapshot_GroupVersionKind = CRDGroupVersion.WithKind(Snapshot_Kind) +) + +func init() { + SchemeBuilder.Register(&Snapshot{}, &SnapshotList{}) +} diff --git a/apis/compute/v1alpha1/zz_snapshotiambinding_terraformed.go b/apis/compute/v1alpha1/zz_snapshotiambinding_terraformed.go new file mode 100755 index 0000000..6bf6283 --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SnapshotIAMBinding +func (mg *SnapshotIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_snapshot_iam_binding" +} + +// GetConnectionDetailsMapping for this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SnapshotIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SnapshotIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &SnapshotIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SnapshotIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_snapshotiambinding_types.go b/apis/compute/v1alpha1/zz_snapshotiambinding_types.go new file mode 100755 index 0000000..18afb05 --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotiambinding_types.go @@ -0,0 +1,117 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SnapshotIAMBindingInitParameters struct { + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_snapshot_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot to attach the policy to. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` +} + +type SnapshotIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_snapshot_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot to attach the policy to. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` +} + +type SnapshotIAMBindingParameters struct { + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_snapshot_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot to attach the policy to. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` +} + +// SnapshotIAMBindingSpec defines the desired state of SnapshotIAMBinding +type SnapshotIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SnapshotIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SnapshotIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// SnapshotIAMBindingStatus defines the observed state of SnapshotIAMBinding. +type SnapshotIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SnapshotIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SnapshotIAMBinding is the Schema for the SnapshotIAMBindings API. Allows management of a single IAM binding for a Snapshot. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SnapshotIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.snapshotId) || (has(self.initProvider) && has(self.initProvider.snapshotId))",message="spec.forProvider.snapshotId is a required parameter" + Spec SnapshotIAMBindingSpec `json:"spec"` + Status SnapshotIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SnapshotIAMBindingList contains a list of SnapshotIAMBindings +type SnapshotIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SnapshotIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + SnapshotIAMBinding_Kind = "SnapshotIAMBinding" + SnapshotIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SnapshotIAMBinding_Kind}.String() + SnapshotIAMBinding_KindAPIVersion = SnapshotIAMBinding_Kind + "." + CRDGroupVersion.String() + SnapshotIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(SnapshotIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&SnapshotIAMBinding{}, &SnapshotIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_snapshotschedule_terraformed.go b/apis/compute/v1alpha1/zz_snapshotschedule_terraformed.go new file mode 100755 index 0000000..d89c78b --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotschedule_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SnapshotSchedule +func (mg *SnapshotSchedule) GetTerraformResourceType() string { + return "yandex_compute_snapshot_schedule" +} + +// GetConnectionDetailsMapping for this SnapshotSchedule +func (tr *SnapshotSchedule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SnapshotSchedule +func (tr *SnapshotSchedule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SnapshotSchedule +func (tr *SnapshotSchedule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SnapshotSchedule +func (tr *SnapshotSchedule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SnapshotSchedule +func (tr *SnapshotSchedule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SnapshotSchedule +func (tr *SnapshotSchedule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SnapshotSchedule +func (tr *SnapshotSchedule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SnapshotSchedule +func (tr *SnapshotSchedule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SnapshotSchedule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SnapshotSchedule) LateInitialize(attrs []byte) (bool, error) { + params := &SnapshotScheduleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SnapshotSchedule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_snapshotschedule_types.go b/apis/compute/v1alpha1/zz_snapshotschedule_types.go new file mode 100755 index 0000000..bea64e0 --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotschedule_types.go @@ -0,0 +1,281 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SchedulePolicyInitParameters struct { + + // Cron expression to schedule snapshots (in cron format "* * * * *"). + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Time to start the snapshot schedule (in format RFC3339 "2006-01-02T15:04:05Z07:00"). If empty current time will be used. Unlike an expression that specifies regularity rules, the start_at parameter determines from what point these rules will be applied. + StartAt *string `json:"startAt,omitempty" tf:"start_at,omitempty"` +} + +type SchedulePolicyObservation struct { + + // Cron expression to schedule snapshots (in cron format "* * * * *"). + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Time to start the snapshot schedule (in format RFC3339 "2006-01-02T15:04:05Z07:00"). If empty current time will be used. Unlike an expression that specifies regularity rules, the start_at parameter determines from what point these rules will be applied. + StartAt *string `json:"startAt,omitempty" tf:"start_at,omitempty"` +} + +type SchedulePolicyParameters struct { + + // Cron expression to schedule snapshots (in cron format "* * * * *"). + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Time to start the snapshot schedule (in format RFC3339 "2006-01-02T15:04:05Z07:00"). If empty current time will be used. Unlike an expression that specifies regularity rules, the start_at parameter determines from what point these rules will be applied. + // +kubebuilder:validation:Optional + StartAt *string `json:"startAt,omitempty" tf:"start_at,omitempty"` +} + +type SnapshotScheduleInitParameters struct { + + // Description of the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // IDs of the disk for snapshot schedule. + // +crossplane:generate:reference:type=Disk + // +listType=set + DiskIds []*string `json:"diskIds,omitempty" tf:"disk_ids,omitempty"` + + // References to Disk to populate diskIds. + // +kubebuilder:validation:Optional + DiskIdsRefs []v1.Reference `json:"diskIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Disk to populate diskIds. + // +kubebuilder:validation:Optional + DiskIdsSelector *v1.Selector `json:"diskIdsSelector,omitempty" tf:"-"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the snapshot schedule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A name for the resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Time duration applied to snapshots created by this snapshot schedule. This is a signed sequence of decimal numbers, each with optional fraction and a unit suffix. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Examples: "300ms", "1.5h" or "2h45m". + RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // Schedule policy of the snapshot schedule. + SchedulePolicy []SchedulePolicyInitParameters `json:"schedulePolicy,omitempty" tf:"schedule_policy,omitempty"` + + // Maximum number of snapshots for every disk of the snapshot schedule. + SnapshotCount *float64 `json:"snapshotCount,omitempty" tf:"snapshot_count,omitempty"` + + // Additional attributes for snapshots created by this snapshot schedule. + SnapshotSpec []SnapshotSpecInitParameters `json:"snapshotSpec,omitempty" tf:"snapshot_spec,omitempty"` +} + +type SnapshotScheduleObservation struct { + + // Creation timestamp of the snapshot schedule. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // IDs of the disk for snapshot schedule. + // +listType=set + DiskIds []*string `json:"diskIds,omitempty" tf:"disk_ids,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the snapshot schedule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A name for the resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Time duration applied to snapshots created by this snapshot schedule. This is a signed sequence of decimal numbers, each with optional fraction and a unit suffix. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Examples: "300ms", "1.5h" or "2h45m". + RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // Schedule policy of the snapshot schedule. + SchedulePolicy []SchedulePolicyObservation `json:"schedulePolicy,omitempty" tf:"schedule_policy,omitempty"` + + // Maximum number of snapshots for every disk of the snapshot schedule. + SnapshotCount *float64 `json:"snapshotCount,omitempty" tf:"snapshot_count,omitempty"` + + // Additional attributes for snapshots created by this snapshot schedule. + SnapshotSpec []SnapshotSpecObservation `json:"snapshotSpec,omitempty" tf:"snapshot_spec,omitempty"` + + // The status of the snapshot schedule. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type SnapshotScheduleParameters struct { + + // Description of the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // IDs of the disk for snapshot schedule. + // +crossplane:generate:reference:type=Disk + // +kubebuilder:validation:Optional + // +listType=set + DiskIds []*string `json:"diskIds,omitempty" tf:"disk_ids,omitempty"` + + // References to Disk to populate diskIds. + // +kubebuilder:validation:Optional + DiskIdsRefs []v1.Reference `json:"diskIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Disk to populate diskIds. + // +kubebuilder:validation:Optional + DiskIdsSelector *v1.Selector `json:"diskIdsSelector,omitempty" tf:"-"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the snapshot schedule. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A name for the resource. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Time duration applied to snapshots created by this snapshot schedule. This is a signed sequence of decimal numbers, each with optional fraction and a unit suffix. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Examples: "300ms", "1.5h" or "2h45m". + // +kubebuilder:validation:Optional + RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // Schedule policy of the snapshot schedule. + // +kubebuilder:validation:Optional + SchedulePolicy []SchedulePolicyParameters `json:"schedulePolicy,omitempty" tf:"schedule_policy,omitempty"` + + // Maximum number of snapshots for every disk of the snapshot schedule. + // +kubebuilder:validation:Optional + SnapshotCount *float64 `json:"snapshotCount,omitempty" tf:"snapshot_count,omitempty"` + + // Additional attributes for snapshots created by this snapshot schedule. + // +kubebuilder:validation:Optional + SnapshotSpec []SnapshotSpecParameters `json:"snapshotSpec,omitempty" tf:"snapshot_spec,omitempty"` +} + +type SnapshotSpecInitParameters struct { + + // Description to assign to snapshots created by this snapshot schedule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A set of key/value label pairs to assign to snapshots created by this snapshot schedule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type SnapshotSpecObservation struct { + + // Description to assign to snapshots created by this snapshot schedule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A set of key/value label pairs to assign to snapshots created by this snapshot schedule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type SnapshotSpecParameters struct { + + // Description to assign to snapshots created by this snapshot schedule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A set of key/value label pairs to assign to snapshots created by this snapshot schedule. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +// SnapshotScheduleSpec defines the desired state of SnapshotSchedule +type SnapshotScheduleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SnapshotScheduleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SnapshotScheduleInitParameters `json:"initProvider,omitempty"` +} + +// SnapshotScheduleStatus defines the observed state of SnapshotSchedule. +type SnapshotScheduleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SnapshotScheduleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SnapshotSchedule is the Schema for the SnapshotSchedules API. Creates a new snapshot schedule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SnapshotSchedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SnapshotScheduleSpec `json:"spec"` + Status SnapshotScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SnapshotScheduleList contains a list of SnapshotSchedules +type SnapshotScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SnapshotSchedule `json:"items"` +} + +// Repository type metadata. +var ( + SnapshotSchedule_Kind = "SnapshotSchedule" + SnapshotSchedule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SnapshotSchedule_Kind}.String() + SnapshotSchedule_KindAPIVersion = SnapshotSchedule_Kind + "." + CRDGroupVersion.String() + SnapshotSchedule_GroupVersionKind = CRDGroupVersion.WithKind(SnapshotSchedule_Kind) +) + +func init() { + SchemeBuilder.Register(&SnapshotSchedule{}, &SnapshotScheduleList{}) +} diff --git a/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_terraformed.go b/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_terraformed.go new file mode 100755 index 0000000..52d6c04 --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SnapshotScheduleIAMBinding +func (mg *SnapshotScheduleIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_snapshot_schedule_iam_binding" +} + +// GetConnectionDetailsMapping for this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SnapshotScheduleIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SnapshotScheduleIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &SnapshotScheduleIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SnapshotScheduleIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_types.go b/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_types.go new file mode 100755 index 0000000..354de9e --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_types.go @@ -0,0 +1,117 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SnapshotScheduleIAMBindingInitParameters struct { + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_snapshot_schedule_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot schedule to attach the policy to. + SnapshotScheduleID *string `json:"snapshotScheduleId,omitempty" tf:"snapshot_schedule_id,omitempty"` +} + +type SnapshotScheduleIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_snapshot_schedule_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot schedule to attach the policy to. + SnapshotScheduleID *string `json:"snapshotScheduleId,omitempty" tf:"snapshot_schedule_id,omitempty"` +} + +type SnapshotScheduleIAMBindingParameters struct { + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one yandex_compute_snapshot_schedule_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot schedule to attach the policy to. + // +kubebuilder:validation:Optional + SnapshotScheduleID *string `json:"snapshotScheduleId,omitempty" tf:"snapshot_schedule_id,omitempty"` +} + +// SnapshotScheduleIAMBindingSpec defines the desired state of SnapshotScheduleIAMBinding +type SnapshotScheduleIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SnapshotScheduleIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SnapshotScheduleIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// SnapshotScheduleIAMBindingStatus defines the observed state of SnapshotScheduleIAMBinding. +type SnapshotScheduleIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SnapshotScheduleIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SnapshotScheduleIAMBinding is the Schema for the SnapshotScheduleIAMBindings API. Allows management of a single IAM binding for a Snapshot Schedule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SnapshotScheduleIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.snapshotScheduleId) || (has(self.initProvider) && has(self.initProvider.snapshotScheduleId))",message="spec.forProvider.snapshotScheduleId is a required parameter" + Spec SnapshotScheduleIAMBindingSpec `json:"spec"` + Status SnapshotScheduleIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SnapshotScheduleIAMBindingList contains a list of SnapshotScheduleIAMBindings +type SnapshotScheduleIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SnapshotScheduleIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + SnapshotScheduleIAMBinding_Kind = "SnapshotScheduleIAMBinding" + SnapshotScheduleIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SnapshotScheduleIAMBinding_Kind}.String() + SnapshotScheduleIAMBinding_KindAPIVersion = SnapshotScheduleIAMBinding_Kind + "." + CRDGroupVersion.String() + SnapshotScheduleIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(SnapshotScheduleIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&SnapshotScheduleIAMBinding{}, &SnapshotScheduleIAMBindingList{}) +} diff --git a/apis/container/v1alpha1/zz_generated.conversion_hubs.go b/apis/container/v1alpha1/zz_generated.conversion_hubs.go index 0eec7c9..efbaf2d 100755 --- a/apis/container/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/container/v1alpha1/zz_generated.conversion_hubs.go @@ -1,25 +1,21 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *Registry) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *Registry) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *RegistryIAMBinding) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *RegistryIPPermission) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *RegistryIAMBinding) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *Repository) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *RegistryIPPermission) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *RepositoryIAMBinding) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *Repository) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *RepositoryLifecyclePolicy) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *RepositoryIAMBinding) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *RepositoryLifecyclePolicy) Hub() {} diff --git a/apis/container/v1alpha1/zz_generated.deepcopy.go b/apis/container/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..aef9c9d --- /dev/null +++ b/apis/container/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1568 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Registry) DeepCopyInto(out *Registry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Registry. +func (in *Registry) DeepCopy() *Registry { + if in == nil { + return nil + } + out := new(Registry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Registry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIAMBinding) DeepCopyInto(out *RegistryIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIAMBinding. +func (in *RegistryIAMBinding) DeepCopy() *RegistryIAMBinding { + if in == nil { + return nil + } + out := new(RegistryIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegistryIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIAMBindingInitParameters) DeepCopyInto(out *RegistryIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.RegistryIDRef != nil { + in, out := &in.RegistryIDRef, &out.RegistryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegistryIDSelector != nil { + in, out := &in.RegistryIDSelector, &out.RegistryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIAMBindingInitParameters. +func (in *RegistryIAMBindingInitParameters) DeepCopy() *RegistryIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(RegistryIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIAMBindingList) DeepCopyInto(out *RegistryIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RegistryIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIAMBindingList. +func (in *RegistryIAMBindingList) DeepCopy() *RegistryIAMBindingList { + if in == nil { + return nil + } + out := new(RegistryIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegistryIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIAMBindingObservation) DeepCopyInto(out *RegistryIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIAMBindingObservation. +func (in *RegistryIAMBindingObservation) DeepCopy() *RegistryIAMBindingObservation { + if in == nil { + return nil + } + out := new(RegistryIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIAMBindingParameters) DeepCopyInto(out *RegistryIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.RegistryIDRef != nil { + in, out := &in.RegistryIDRef, &out.RegistryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegistryIDSelector != nil { + in, out := &in.RegistryIDSelector, &out.RegistryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIAMBindingParameters. +func (in *RegistryIAMBindingParameters) DeepCopy() *RegistryIAMBindingParameters { + if in == nil { + return nil + } + out := new(RegistryIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIAMBindingSpec) DeepCopyInto(out *RegistryIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIAMBindingSpec. +func (in *RegistryIAMBindingSpec) DeepCopy() *RegistryIAMBindingSpec { + if in == nil { + return nil + } + out := new(RegistryIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIAMBindingStatus) DeepCopyInto(out *RegistryIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIAMBindingStatus. +func (in *RegistryIAMBindingStatus) DeepCopy() *RegistryIAMBindingStatus { + if in == nil { + return nil + } + out := new(RegistryIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIPPermission) DeepCopyInto(out *RegistryIPPermission) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIPPermission. +func (in *RegistryIPPermission) DeepCopy() *RegistryIPPermission { + if in == nil { + return nil + } + out := new(RegistryIPPermission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegistryIPPermission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIPPermissionInitParameters) DeepCopyInto(out *RegistryIPPermissionInitParameters) { + *out = *in + if in.Pull != nil { + in, out := &in.Pull, &out.Pull + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Push != nil { + in, out := &in.Push, &out.Push + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.RegistryIDRef != nil { + in, out := &in.RegistryIDRef, &out.RegistryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegistryIDSelector != nil { + in, out := &in.RegistryIDSelector, &out.RegistryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIPPermissionInitParameters. +func (in *RegistryIPPermissionInitParameters) DeepCopy() *RegistryIPPermissionInitParameters { + if in == nil { + return nil + } + out := new(RegistryIPPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIPPermissionList) DeepCopyInto(out *RegistryIPPermissionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RegistryIPPermission, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIPPermissionList. +func (in *RegistryIPPermissionList) DeepCopy() *RegistryIPPermissionList { + if in == nil { + return nil + } + out := new(RegistryIPPermissionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegistryIPPermissionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIPPermissionObservation) DeepCopyInto(out *RegistryIPPermissionObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Pull != nil { + in, out := &in.Pull, &out.Pull + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Push != nil { + in, out := &in.Push, &out.Push + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIPPermissionObservation. +func (in *RegistryIPPermissionObservation) DeepCopy() *RegistryIPPermissionObservation { + if in == nil { + return nil + } + out := new(RegistryIPPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIPPermissionParameters) DeepCopyInto(out *RegistryIPPermissionParameters) { + *out = *in + if in.Pull != nil { + in, out := &in.Pull, &out.Pull + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Push != nil { + in, out := &in.Push, &out.Push + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.RegistryIDRef != nil { + in, out := &in.RegistryIDRef, &out.RegistryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegistryIDSelector != nil { + in, out := &in.RegistryIDSelector, &out.RegistryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIPPermissionParameters. +func (in *RegistryIPPermissionParameters) DeepCopy() *RegistryIPPermissionParameters { + if in == nil { + return nil + } + out := new(RegistryIPPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIPPermissionSpec) DeepCopyInto(out *RegistryIPPermissionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIPPermissionSpec. +func (in *RegistryIPPermissionSpec) DeepCopy() *RegistryIPPermissionSpec { + if in == nil { + return nil + } + out := new(RegistryIPPermissionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryIPPermissionStatus) DeepCopyInto(out *RegistryIPPermissionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryIPPermissionStatus. +func (in *RegistryIPPermissionStatus) DeepCopy() *RegistryIPPermissionStatus { + if in == nil { + return nil + } + out := new(RegistryIPPermissionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryInitParameters) DeepCopyInto(out *RegistryInitParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryInitParameters. +func (in *RegistryInitParameters) DeepCopy() *RegistryInitParameters { + if in == nil { + return nil + } + out := new(RegistryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryList) DeepCopyInto(out *RegistryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Registry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryList. +func (in *RegistryList) DeepCopy() *RegistryList { + if in == nil { + return nil + } + out := new(RegistryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegistryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryObservation) DeepCopyInto(out *RegistryObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryObservation. +func (in *RegistryObservation) DeepCopy() *RegistryObservation { + if in == nil { + return nil + } + out := new(RegistryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryParameters) DeepCopyInto(out *RegistryParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryParameters. +func (in *RegistryParameters) DeepCopy() *RegistryParameters { + if in == nil { + return nil + } + out := new(RegistryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrySpec) DeepCopyInto(out *RegistrySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySpec. +func (in *RegistrySpec) DeepCopy() *RegistrySpec { + if in == nil { + return nil + } + out := new(RegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryStatus) DeepCopyInto(out *RegistryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryStatus. +func (in *RegistryStatus) DeepCopy() *RegistryStatus { + if in == nil { + return nil + } + out := new(RegistryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Repository) DeepCopyInto(out *Repository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Repository. +func (in *Repository) DeepCopy() *Repository { + if in == nil { + return nil + } + out := new(Repository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Repository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryIAMBinding) DeepCopyInto(out *RepositoryIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryIAMBinding. +func (in *RepositoryIAMBinding) DeepCopy() *RepositoryIAMBinding { + if in == nil { + return nil + } + out := new(RepositoryIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RepositoryIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryIAMBindingInitParameters) DeepCopyInto(out *RepositoryIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepositoryID != nil { + in, out := &in.RepositoryID, &out.RepositoryID + *out = new(string) + **out = **in + } + if in.RepositoryIDRef != nil { + in, out := &in.RepositoryIDRef, &out.RepositoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RepositoryIDSelector != nil { + in, out := &in.RepositoryIDSelector, &out.RepositoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryIAMBindingInitParameters. +func (in *RepositoryIAMBindingInitParameters) DeepCopy() *RepositoryIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(RepositoryIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryIAMBindingList) DeepCopyInto(out *RepositoryIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RepositoryIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryIAMBindingList. +func (in *RepositoryIAMBindingList) DeepCopy() *RepositoryIAMBindingList { + if in == nil { + return nil + } + out := new(RepositoryIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RepositoryIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryIAMBindingObservation) DeepCopyInto(out *RepositoryIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepositoryID != nil { + in, out := &in.RepositoryID, &out.RepositoryID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryIAMBindingObservation. +func (in *RepositoryIAMBindingObservation) DeepCopy() *RepositoryIAMBindingObservation { + if in == nil { + return nil + } + out := new(RepositoryIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryIAMBindingParameters) DeepCopyInto(out *RepositoryIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepositoryID != nil { + in, out := &in.RepositoryID, &out.RepositoryID + *out = new(string) + **out = **in + } + if in.RepositoryIDRef != nil { + in, out := &in.RepositoryIDRef, &out.RepositoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RepositoryIDSelector != nil { + in, out := &in.RepositoryIDSelector, &out.RepositoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryIAMBindingParameters. +func (in *RepositoryIAMBindingParameters) DeepCopy() *RepositoryIAMBindingParameters { + if in == nil { + return nil + } + out := new(RepositoryIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryIAMBindingSpec) DeepCopyInto(out *RepositoryIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryIAMBindingSpec. +func (in *RepositoryIAMBindingSpec) DeepCopy() *RepositoryIAMBindingSpec { + if in == nil { + return nil + } + out := new(RepositoryIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryIAMBindingStatus) DeepCopyInto(out *RepositoryIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryIAMBindingStatus. +func (in *RepositoryIAMBindingStatus) DeepCopy() *RepositoryIAMBindingStatus { + if in == nil { + return nil + } + out := new(RepositoryIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryInitParameters) DeepCopyInto(out *RepositoryInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryInitParameters. +func (in *RepositoryInitParameters) DeepCopy() *RepositoryInitParameters { + if in == nil { + return nil + } + out := new(RepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryLifecyclePolicy) DeepCopyInto(out *RepositoryLifecyclePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryLifecyclePolicy. +func (in *RepositoryLifecyclePolicy) DeepCopy() *RepositoryLifecyclePolicy { + if in == nil { + return nil + } + out := new(RepositoryLifecyclePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RepositoryLifecyclePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryLifecyclePolicyInitParameters) DeepCopyInto(out *RepositoryLifecyclePolicyInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RepositoryID != nil { + in, out := &in.RepositoryID, &out.RepositoryID + *out = new(string) + **out = **in + } + if in.RepositoryIDRef != nil { + in, out := &in.RepositoryIDRef, &out.RepositoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RepositoryIDSelector != nil { + in, out := &in.RepositoryIDSelector, &out.RepositoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryLifecyclePolicyInitParameters. +func (in *RepositoryLifecyclePolicyInitParameters) DeepCopy() *RepositoryLifecyclePolicyInitParameters { + if in == nil { + return nil + } + out := new(RepositoryLifecyclePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryLifecyclePolicyList) DeepCopyInto(out *RepositoryLifecyclePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RepositoryLifecyclePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryLifecyclePolicyList. +func (in *RepositoryLifecyclePolicyList) DeepCopy() *RepositoryLifecyclePolicyList { + if in == nil { + return nil + } + out := new(RepositoryLifecyclePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RepositoryLifecyclePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryLifecyclePolicyObservation) DeepCopyInto(out *RepositoryLifecyclePolicyObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RepositoryID != nil { + in, out := &in.RepositoryID, &out.RepositoryID + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryLifecyclePolicyObservation. +func (in *RepositoryLifecyclePolicyObservation) DeepCopy() *RepositoryLifecyclePolicyObservation { + if in == nil { + return nil + } + out := new(RepositoryLifecyclePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryLifecyclePolicyParameters) DeepCopyInto(out *RepositoryLifecyclePolicyParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RepositoryID != nil { + in, out := &in.RepositoryID, &out.RepositoryID + *out = new(string) + **out = **in + } + if in.RepositoryIDRef != nil { + in, out := &in.RepositoryIDRef, &out.RepositoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RepositoryIDSelector != nil { + in, out := &in.RepositoryIDSelector, &out.RepositoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryLifecyclePolicyParameters. +func (in *RepositoryLifecyclePolicyParameters) DeepCopy() *RepositoryLifecyclePolicyParameters { + if in == nil { + return nil + } + out := new(RepositoryLifecyclePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryLifecyclePolicySpec) DeepCopyInto(out *RepositoryLifecyclePolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryLifecyclePolicySpec. +func (in *RepositoryLifecyclePolicySpec) DeepCopy() *RepositoryLifecyclePolicySpec { + if in == nil { + return nil + } + out := new(RepositoryLifecyclePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryLifecyclePolicyStatus) DeepCopyInto(out *RepositoryLifecyclePolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryLifecyclePolicyStatus. +func (in *RepositoryLifecyclePolicyStatus) DeepCopy() *RepositoryLifecyclePolicyStatus { + if in == nil { + return nil + } + out := new(RepositoryLifecyclePolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryList) DeepCopyInto(out *RepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Repository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryList. +func (in *RepositoryList) DeepCopy() *RepositoryList { + if in == nil { + return nil + } + out := new(RepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryObservation) DeepCopyInto(out *RepositoryObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryObservation. +func (in *RepositoryObservation) DeepCopy() *RepositoryObservation { + if in == nil { + return nil + } + out := new(RepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryParameters) DeepCopyInto(out *RepositoryParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryParameters. +func (in *RepositoryParameters) DeepCopy() *RepositoryParameters { + if in == nil { + return nil + } + out := new(RepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositorySpec) DeepCopyInto(out *RepositorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositorySpec. +func (in *RepositorySpec) DeepCopy() *RepositorySpec { + if in == nil { + return nil + } + out := new(RepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryStatus) DeepCopyInto(out *RepositoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryStatus. +func (in *RepositoryStatus) DeepCopy() *RepositoryStatus { + if in == nil { + return nil + } + out := new(RepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExpirePeriod != nil { + in, out := &in.ExpirePeriod, &out.ExpirePeriod + *out = new(string) + **out = **in + } + if in.RetainedTop != nil { + in, out := &in.RetainedTop, &out.RetainedTop + *out = new(float64) + **out = **in + } + if in.TagRegexp != nil { + in, out := &in.TagRegexp, &out.TagRegexp + *out = new(string) + **out = **in + } + if in.Untagged != nil { + in, out := &in.Untagged, &out.Untagged + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleInitParameters. +func (in *RuleInitParameters) DeepCopy() *RuleInitParameters { + if in == nil { + return nil + } + out := new(RuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExpirePeriod != nil { + in, out := &in.ExpirePeriod, &out.ExpirePeriod + *out = new(string) + **out = **in + } + if in.RetainedTop != nil { + in, out := &in.RetainedTop, &out.RetainedTop + *out = new(float64) + **out = **in + } + if in.TagRegexp != nil { + in, out := &in.TagRegexp, &out.TagRegexp + *out = new(string) + **out = **in + } + if in.Untagged != nil { + in, out := &in.Untagged, &out.Untagged + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleObservation. +func (in *RuleObservation) DeepCopy() *RuleObservation { + if in == nil { + return nil + } + out := new(RuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExpirePeriod != nil { + in, out := &in.ExpirePeriod, &out.ExpirePeriod + *out = new(string) + **out = **in + } + if in.RetainedTop != nil { + in, out := &in.RetainedTop, &out.RetainedTop + *out = new(float64) + **out = **in + } + if in.TagRegexp != nil { + in, out := &in.TagRegexp, &out.TagRegexp + *out = new(string) + **out = **in + } + if in.Untagged != nil { + in, out := &in.Untagged, &out.Untagged + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleParameters. +func (in *RuleParameters) DeepCopy() *RuleParameters { + if in == nil { + return nil + } + out := new(RuleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/container/v1alpha1/zz_generated.resolvers.go b/apis/container/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..c35ce7b --- /dev/null +++ b/apis/container/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,289 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + iam "github.com/tagesjump/provider-upjet-yc/config/iam" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Registry. +func (mg *Registry) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RegistryIAMBinding. +func (mg *RegistryIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.ForProvider.ServiceAccountRef, + Selector: mg.Spec.ForProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Members") + } + mg.Spec.ForProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RegistryID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RegistryIDRef, + Selector: mg.Spec.ForProvider.RegistryIDSelector, + To: reference.To{ + List: &RegistryList{}, + Managed: &Registry{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RegistryID") + } + mg.Spec.ForProvider.RegistryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RegistryIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.InitProvider.ServiceAccountRef, + Selector: mg.Spec.InitProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Members") + } + mg.Spec.InitProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RegistryID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.RegistryIDRef, + Selector: mg.Spec.InitProvider.RegistryIDSelector, + To: reference.To{ + List: &RegistryList{}, + Managed: &Registry{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RegistryID") + } + mg.Spec.InitProvider.RegistryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RegistryIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RegistryIPPermission. +func (mg *RegistryIPPermission) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RegistryID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RegistryIDRef, + Selector: mg.Spec.ForProvider.RegistryIDSelector, + To: reference.To{ + List: &RegistryList{}, + Managed: &Registry{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RegistryID") + } + mg.Spec.ForProvider.RegistryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RegistryIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RegistryID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.RegistryIDRef, + Selector: mg.Spec.InitProvider.RegistryIDSelector, + To: reference.To{ + List: &RegistryList{}, + Managed: &Registry{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RegistryID") + } + mg.Spec.InitProvider.RegistryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RegistryIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RepositoryIAMBinding. +func (mg *RepositoryIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.ForProvider.ServiceAccountRef, + Selector: mg.Spec.ForProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Members") + } + mg.Spec.ForProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RepositoryID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RepositoryIDRef, + Selector: mg.Spec.ForProvider.RepositoryIDSelector, + To: reference.To{ + List: &RepositoryList{}, + Managed: &Repository{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RepositoryID") + } + mg.Spec.ForProvider.RepositoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RepositoryIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.InitProvider.ServiceAccountRef, + Selector: mg.Spec.InitProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Members") + } + mg.Spec.InitProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RepositoryID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.RepositoryIDRef, + Selector: mg.Spec.InitProvider.RepositoryIDSelector, + To: reference.To{ + List: &RepositoryList{}, + Managed: &Repository{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RepositoryID") + } + mg.Spec.InitProvider.RepositoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RepositoryIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RepositoryLifecyclePolicy. +func (mg *RepositoryLifecyclePolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RepositoryID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RepositoryIDRef, + Selector: mg.Spec.ForProvider.RepositoryIDSelector, + To: reference.To{ + List: &RepositoryList{}, + Managed: &Repository{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RepositoryID") + } + mg.Spec.ForProvider.RepositoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RepositoryIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RepositoryID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.RepositoryIDRef, + Selector: mg.Spec.InitProvider.RepositoryIDSelector, + To: reference.To{ + List: &RepositoryList{}, + Managed: &Repository{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RepositoryID") + } + mg.Spec.InitProvider.RepositoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RepositoryIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/container/v1alpha1/zz_groupversion_info.go b/apis/container/v1alpha1/zz_groupversion_info.go index 5089b4e..0719cef 100755 --- a/apis/container/v1alpha1/zz_groupversion_info.go +++ b/apis/container/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/container/v1alpha1/zz_registry_terraformed.go b/apis/container/v1alpha1/zz_registry_terraformed.go index eb830ba..e4b0391 100755 --- a/apis/container/v1alpha1/zz_registry_terraformed.go +++ b/apis/container/v1alpha1/zz_registry_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Registry func (mg *Registry) GetTerraformResourceType() string { - return "yandex_container_registry" + return "yandex_container_registry" } // GetConnectionDetailsMapping for this Registry func (tr *Registry) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Registry func (tr *Registry) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Registry func (tr *Registry) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Registry func (tr *Registry) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Registry func (tr *Registry) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Registry func (tr *Registry) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Registry func (tr *Registry) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Registry func (tr *Registry) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Registry using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Registry) LateInitialize(attrs []byte) (bool, error) { - params := &RegistryParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &RegistryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Registry) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/container/v1alpha1/zz_registry_types.go b/apis/container/v1alpha1/zz_registry_types.go index 7793897..b476c58 100755 --- a/apis/container/v1alpha1/zz_registry_types.go +++ b/apis/container/v1alpha1/zz_registry_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,89 +7,80 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type RegistryInitParameters struct { + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder that the resource belongs to. If value is omitted, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // A set of key/value label pairs to assign to the registry. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the registry. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - -// A name of the registry. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // A name of the registry. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type RegistryObservation struct { + // Creation timestamp of the registry. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Creation timestamp of the registry. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` - -// Folder that the resource belongs to. If value is omitted, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the registry. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the registry. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A name of the registry. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // A name of the registry. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Status of the registry. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // Status of the registry. + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type RegistryParameters struct { + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder that the resource belongs to. If value is omitted, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of key/value label pairs to assign to the registry. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the registry. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A name of the registry. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // A name of the registry. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } // RegistrySpec defines the desired state of Registry type RegistrySpec struct { v1.ResourceSpec `json:",inline"` - ForProvider RegistryParameters `json:"forProvider"` + ForProvider RegistryParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -102,20 +91,19 @@ type RegistrySpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider RegistryInitParameters `json:"initProvider,omitempty"` + InitProvider RegistryInitParameters `json:"initProvider,omitempty"` } // RegistryStatus defines the observed state of Registry. type RegistryStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider RegistryObservation `json:"atProvider,omitempty"` + AtProvider RegistryObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Registry is the Schema for the Registrys API. Creates a new container registry. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/container/v1alpha1/zz_registryiambinding_terraformed.go b/apis/container/v1alpha1/zz_registryiambinding_terraformed.go index 26a27c1..a676835 100755 --- a/apis/container/v1alpha1/zz_registryiambinding_terraformed.go +++ b/apis/container/v1alpha1/zz_registryiambinding_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this RegistryIAMBinding func (mg *RegistryIAMBinding) GetTerraformResourceType() string { - return "yandex_container_registry_iam_binding" + return "yandex_container_registry_iam_binding" } // GetConnectionDetailsMapping for this RegistryIAMBinding func (tr *RegistryIAMBinding) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this RegistryIAMBinding func (tr *RegistryIAMBinding) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this RegistryIAMBinding func (tr *RegistryIAMBinding) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this RegistryIAMBinding func (tr *RegistryIAMBinding) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this RegistryIAMBinding func (tr *RegistryIAMBinding) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this RegistryIAMBinding func (tr *RegistryIAMBinding) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this RegistryIAMBinding func (tr *RegistryIAMBinding) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this RegistryIAMBinding func (tr *RegistryIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this RegistryIAMBinding using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *RegistryIAMBinding) LateInitialize(attrs []byte) (bool, error) { - params := &RegistryIAMBindingParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &RegistryIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *RegistryIAMBinding) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/container/v1alpha1/zz_registryiambinding_types.go b/apis/container/v1alpha1/zz_registryiambinding_types.go index 540a533..e7ec391 100755 --- a/apis/container/v1alpha1/zz_registryiambinding_types.go +++ b/apis/container/v1alpha1/zz_registryiambinding_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,114 +7,104 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type RegistryIAMBindingInitParameters struct { + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() -// +crossplane:generate:reference:refFieldName=ServiceAccountRef -// +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` - -// The Yandex Container Registry ID to apply a binding to. -// +crossplane:generate:reference:type=Registry -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + // The Yandex Container Registry ID to apply a binding to. + // +crossplane:generate:reference:type=Registry + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` -// Reference to a Registry to populate registryId. -// +kubebuilder:validation:Optional -RegistryIDRef *v1.Reference `json:"registryIdRef,omitempty" tf:"-"` + // Reference to a Registry to populate registryId. + // +kubebuilder:validation:Optional + RegistryIDRef *v1.Reference `json:"registryIdRef,omitempty" tf:"-"` -// Selector for a Registry to populate registryId. -// +kubebuilder:validation:Optional -RegistryIDSelector *v1.Selector `json:"registryIdSelector,omitempty" tf:"-"` + // Selector for a Registry to populate registryId. + // +kubebuilder:validation:Optional + RegistryIDSelector *v1.Selector `json:"registryIdSelector,omitempty" tf:"-"` -// The role that should be applied. See roles. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// References to ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` -// Selector for a list of ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type RegistryIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` - -// The Yandex Container Registry ID to apply a binding to. -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + // The Yandex Container Registry ID to apply a binding to. + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` -// The role that should be applied. See roles. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type RegistryIAMBindingParameters struct { - -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() -// +crossplane:generate:reference:refFieldName=ServiceAccountRef -// +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector -// +kubebuilder:validation:Optional -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` - -// The Yandex Container Registry ID to apply a binding to. -// +crossplane:generate:reference:type=Registry -// +kubebuilder:validation:Optional -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` - -// Reference to a Registry to populate registryId. -// +kubebuilder:validation:Optional -RegistryIDRef *v1.Reference `json:"registryIdRef,omitempty" tf:"-"` - -// Selector for a Registry to populate registryId. -// +kubebuilder:validation:Optional -RegistryIDSelector *v1.Selector `json:"registryIdSelector,omitempty" tf:"-"` - -// The role that should be applied. See roles. -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -// References to ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` - -// Selector for a list of ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` - -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The Yandex Container Registry ID to apply a binding to. + // +crossplane:generate:reference:type=Registry + // +kubebuilder:validation:Optional + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + + // Reference to a Registry to populate registryId. + // +kubebuilder:validation:Optional + RegistryIDRef *v1.Reference `json:"registryIdRef,omitempty" tf:"-"` + + // Selector for a Registry to populate registryId. + // +kubebuilder:validation:Optional + RegistryIDSelector *v1.Selector `json:"registryIdSelector,omitempty" tf:"-"` + + // The role that should be applied. See roles. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // RegistryIAMBindingSpec defines the desired state of RegistryIAMBinding type RegistryIAMBindingSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider RegistryIAMBindingParameters `json:"forProvider"` + ForProvider RegistryIAMBindingParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -127,20 +115,19 @@ type RegistryIAMBindingSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider RegistryIAMBindingInitParameters `json:"initProvider,omitempty"` + InitProvider RegistryIAMBindingInitParameters `json:"initProvider,omitempty"` } // RegistryIAMBindingStatus defines the observed state of RegistryIAMBinding. type RegistryIAMBindingStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider RegistryIAMBindingObservation `json:"atProvider,omitempty"` + AtProvider RegistryIAMBindingObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // RegistryIAMBinding is the Schema for the RegistryIAMBindings API. Allows management of a single IAM binding for a // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -150,9 +137,9 @@ type RegistryIAMBindingStatus struct { type RegistryIAMBinding struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec RegistryIAMBindingSpec `json:"spec"` - Status RegistryIAMBindingStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec RegistryIAMBindingSpec `json:"spec"` + Status RegistryIAMBindingStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/container/v1alpha1/zz_registryippermission_terraformed.go b/apis/container/v1alpha1/zz_registryippermission_terraformed.go index fb8c75f..a76edf6 100755 --- a/apis/container/v1alpha1/zz_registryippermission_terraformed.go +++ b/apis/container/v1alpha1/zz_registryippermission_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this RegistryIPPermission func (mg *RegistryIPPermission) GetTerraformResourceType() string { - return "yandex_container_registry_ip_permission" + return "yandex_container_registry_ip_permission" } // GetConnectionDetailsMapping for this RegistryIPPermission func (tr *RegistryIPPermission) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this RegistryIPPermission func (tr *RegistryIPPermission) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this RegistryIPPermission func (tr *RegistryIPPermission) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this RegistryIPPermission func (tr *RegistryIPPermission) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this RegistryIPPermission func (tr *RegistryIPPermission) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this RegistryIPPermission func (tr *RegistryIPPermission) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this RegistryIPPermission func (tr *RegistryIPPermission) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this RegistryIPPermission func (tr *RegistryIPPermission) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this RegistryIPPermission using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *RegistryIPPermission) LateInitialize(attrs []byte) (bool, error) { - params := &RegistryIPPermissionParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &RegistryIPPermissionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *RegistryIPPermission) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/container/v1alpha1/zz_registryippermission_types.go b/apis/container/v1alpha1/zz_registryippermission_types.go index a1a8914..5789875 100755 --- a/apis/container/v1alpha1/zz_registryippermission_types.go +++ b/apis/container/v1alpha1/zz_registryippermission_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,86 +7,76 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type RegistryIPPermissionInitParameters struct { + // List of configured CIDRs, from which pull is allowed. + // +listType=set + Pull []*string `json:"pull,omitempty" tf:"pull,omitempty"` -// List of configured CIDRs, from which pull is allowed. -// +listType=set -Pull []*string `json:"pull,omitempty" tf:"pull,omitempty"` - -// List of configured CIDRs, from which push is allowed. -// +listType=set -Push []*string `json:"push,omitempty" tf:"push,omitempty"` + // List of configured CIDRs, from which push is allowed. + // +listType=set + Push []*string `json:"push,omitempty" tf:"push,omitempty"` -// The ID of the registry that ip restrictions applied to. -// +crossplane:generate:reference:type=Registry -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + // The ID of the registry that ip restrictions applied to. + // +crossplane:generate:reference:type=Registry + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` -// Reference to a Registry to populate registryId. -// +kubebuilder:validation:Optional -RegistryIDRef *v1.Reference `json:"registryIdRef,omitempty" tf:"-"` + // Reference to a Registry to populate registryId. + // +kubebuilder:validation:Optional + RegistryIDRef *v1.Reference `json:"registryIdRef,omitempty" tf:"-"` -// Selector for a Registry to populate registryId. -// +kubebuilder:validation:Optional -RegistryIDSelector *v1.Selector `json:"registryIdSelector,omitempty" tf:"-"` + // Selector for a Registry to populate registryId. + // +kubebuilder:validation:Optional + RegistryIDSelector *v1.Selector `json:"registryIdSelector,omitempty" tf:"-"` } - type RegistryIPPermissionObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + // List of configured CIDRs, from which pull is allowed. + // +listType=set + Pull []*string `json:"pull,omitempty" tf:"pull,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// List of configured CIDRs, from which pull is allowed. -// +listType=set -Pull []*string `json:"pull,omitempty" tf:"pull,omitempty"` - -// List of configured CIDRs, from which push is allowed. -// +listType=set -Push []*string `json:"push,omitempty" tf:"push,omitempty"` + // List of configured CIDRs, from which push is allowed. + // +listType=set + Push []*string `json:"push,omitempty" tf:"push,omitempty"` -// The ID of the registry that ip restrictions applied to. -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + // The ID of the registry that ip restrictions applied to. + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` } - type RegistryIPPermissionParameters struct { + // List of configured CIDRs, from which pull is allowed. + // +kubebuilder:validation:Optional + // +listType=set + Pull []*string `json:"pull,omitempty" tf:"pull,omitempty"` -// List of configured CIDRs, from which pull is allowed. -// +kubebuilder:validation:Optional -// +listType=set -Pull []*string `json:"pull,omitempty" tf:"pull,omitempty"` - -// List of configured CIDRs, from which push is allowed. -// +kubebuilder:validation:Optional -// +listType=set -Push []*string `json:"push,omitempty" tf:"push,omitempty"` + // List of configured CIDRs, from which push is allowed. + // +kubebuilder:validation:Optional + // +listType=set + Push []*string `json:"push,omitempty" tf:"push,omitempty"` -// The ID of the registry that ip restrictions applied to. -// +crossplane:generate:reference:type=Registry -// +kubebuilder:validation:Optional -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + // The ID of the registry that ip restrictions applied to. + // +crossplane:generate:reference:type=Registry + // +kubebuilder:validation:Optional + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` -// Reference to a Registry to populate registryId. -// +kubebuilder:validation:Optional -RegistryIDRef *v1.Reference `json:"registryIdRef,omitempty" tf:"-"` + // Reference to a Registry to populate registryId. + // +kubebuilder:validation:Optional + RegistryIDRef *v1.Reference `json:"registryIdRef,omitempty" tf:"-"` -// Selector for a Registry to populate registryId. -// +kubebuilder:validation:Optional -RegistryIDSelector *v1.Selector `json:"registryIdSelector,omitempty" tf:"-"` + // Selector for a Registry to populate registryId. + // +kubebuilder:validation:Optional + RegistryIDSelector *v1.Selector `json:"registryIdSelector,omitempty" tf:"-"` } // RegistryIPPermissionSpec defines the desired state of RegistryIPPermission type RegistryIPPermissionSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider RegistryIPPermissionParameters `json:"forProvider"` + ForProvider RegistryIPPermissionParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -99,20 +87,19 @@ type RegistryIPPermissionSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider RegistryIPPermissionInitParameters `json:"initProvider,omitempty"` + InitProvider RegistryIPPermissionInitParameters `json:"initProvider,omitempty"` } // RegistryIPPermissionStatus defines the observed state of RegistryIPPermission. type RegistryIPPermissionStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider RegistryIPPermissionObservation `json:"atProvider,omitempty"` + AtProvider RegistryIPPermissionObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // RegistryIPPermission is the Schema for the RegistryIPPermissions API. Creates a new Container Registry IP Permission. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/container/v1alpha1/zz_repository_terraformed.go b/apis/container/v1alpha1/zz_repository_terraformed.go index 2101852..95647c4 100755 --- a/apis/container/v1alpha1/zz_repository_terraformed.go +++ b/apis/container/v1alpha1/zz_repository_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Repository func (mg *Repository) GetTerraformResourceType() string { - return "yandex_container_repository" + return "yandex_container_repository" } // GetConnectionDetailsMapping for this Repository func (tr *Repository) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Repository func (tr *Repository) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Repository func (tr *Repository) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Repository func (tr *Repository) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Repository func (tr *Repository) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Repository func (tr *Repository) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Repository func (tr *Repository) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Repository func (tr *Repository) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Repository using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Repository) LateInitialize(attrs []byte) (bool, error) { - params := &RepositoryParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &RepositoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Repository) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/container/v1alpha1/zz_repository_types.go b/apis/container/v1alpha1/zz_repository_types.go index 5a3187e..b34ec15 100755 --- a/apis/container/v1alpha1/zz_repository_types.go +++ b/apis/container/v1alpha1/zz_repository_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,43 +7,34 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type RepositoryInitParameters struct { - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type RepositoryObservation struct { + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// (String) The ID of this resource. -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type RepositoryParameters struct { - -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } // RepositorySpec defines the desired state of Repository type RepositorySpec struct { v1.ResourceSpec `json:",inline"` - ForProvider RepositoryParameters `json:"forProvider"` + ForProvider RepositoryParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -56,21 +45,20 @@ type RepositorySpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider RepositoryInitParameters `json:"initProvider,omitempty"` + InitProvider RepositoryInitParameters `json:"initProvider,omitempty"` } // RepositoryStatus defines the observed state of Repository. type RepositoryStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider RepositoryObservation `json:"atProvider,omitempty"` + AtProvider RepositoryObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - -// Repository is the Schema for the Repositorys API. +// Repository is the Schema for the Repositorys API. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" @@ -79,9 +67,9 @@ type RepositoryStatus struct { type Repository struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec RepositorySpec `json:"spec"` - Status RepositoryStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec RepositorySpec `json:"spec"` + Status RepositoryStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/container/v1alpha1/zz_repositoryiambinding_terraformed.go b/apis/container/v1alpha1/zz_repositoryiambinding_terraformed.go index 3615de8..f8850fe 100755 --- a/apis/container/v1alpha1/zz_repositoryiambinding_terraformed.go +++ b/apis/container/v1alpha1/zz_repositoryiambinding_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this RepositoryIAMBinding func (mg *RepositoryIAMBinding) GetTerraformResourceType() string { - return "yandex_container_repository_iam_binding" + return "yandex_container_repository_iam_binding" } // GetConnectionDetailsMapping for this RepositoryIAMBinding func (tr *RepositoryIAMBinding) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this RepositoryIAMBinding func (tr *RepositoryIAMBinding) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this RepositoryIAMBinding func (tr *RepositoryIAMBinding) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this RepositoryIAMBinding func (tr *RepositoryIAMBinding) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this RepositoryIAMBinding func (tr *RepositoryIAMBinding) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this RepositoryIAMBinding func (tr *RepositoryIAMBinding) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this RepositoryIAMBinding func (tr *RepositoryIAMBinding) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this RepositoryIAMBinding func (tr *RepositoryIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this RepositoryIAMBinding using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *RepositoryIAMBinding) LateInitialize(attrs []byte) (bool, error) { - params := &RepositoryIAMBindingParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &RepositoryIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *RepositoryIAMBinding) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/container/v1alpha1/zz_repositoryiambinding_types.go b/apis/container/v1alpha1/zz_repositoryiambinding_types.go index ac5b004..ced5566 100755 --- a/apis/container/v1alpha1/zz_repositoryiambinding_types.go +++ b/apis/container/v1alpha1/zz_repositoryiambinding_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,118 +7,109 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type RepositoryIAMBindingInitParameters struct { + // (Set of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// (Set of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() -// +crossplane:generate:reference:refFieldName=ServiceAccountRef -// +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` - -// (String) -// +crossplane:generate:reference:type=Repository -RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=Repository + RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` -// Reference to a Repository to populate repositoryId. -// +kubebuilder:validation:Optional -RepositoryIDRef *v1.Reference `json:"repositoryIdRef,omitempty" tf:"-"` + // Reference to a Repository to populate repositoryId. + // +kubebuilder:validation:Optional + RepositoryIDRef *v1.Reference `json:"repositoryIdRef,omitempty" tf:"-"` -// Selector for a Repository to populate repositoryId. -// +kubebuilder:validation:Optional -RepositoryIDSelector *v1.Selector `json:"repositoryIdSelector,omitempty" tf:"-"` + // Selector for a Repository to populate repositoryId. + // +kubebuilder:validation:Optional + RepositoryIDSelector *v1.Selector `json:"repositoryIdSelector,omitempty" tf:"-"` -// (String) -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // (String) + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// References to ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` -// Selector for a list of ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` -// (Number) -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // (Number) + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type RepositoryIAMBindingObservation struct { + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// (String) The ID of this resource. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // (Set of String) + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// (Set of String) -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // (String) + RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` -// (String) -RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` + // (String) + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// (String) -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -// (Number) -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // (Number) + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type RepositoryIAMBindingParameters struct { - -// (Set of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() -// +crossplane:generate:reference:refFieldName=ServiceAccountRef -// +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector -// +kubebuilder:validation:Optional -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` - -// (String) -// +crossplane:generate:reference:type=Repository -// +kubebuilder:validation:Optional -RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` - -// Reference to a Repository to populate repositoryId. -// +kubebuilder:validation:Optional -RepositoryIDRef *v1.Reference `json:"repositoryIdRef,omitempty" tf:"-"` - -// Selector for a Repository to populate repositoryId. -// +kubebuilder:validation:Optional -RepositoryIDSelector *v1.Selector `json:"repositoryIdSelector,omitempty" tf:"-"` - -// (String) -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -// References to ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` - -// Selector for a list of ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` - -// (Number) -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // (Set of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // (String) + // +crossplane:generate:reference:type=Repository + // +kubebuilder:validation:Optional + RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` + + // Reference to a Repository to populate repositoryId. + // +kubebuilder:validation:Optional + RepositoryIDRef *v1.Reference `json:"repositoryIdRef,omitempty" tf:"-"` + + // Selector for a Repository to populate repositoryId. + // +kubebuilder:validation:Optional + RepositoryIDSelector *v1.Selector `json:"repositoryIdSelector,omitempty" tf:"-"` + + // (String) + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // (Number) + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // RepositoryIAMBindingSpec defines the desired state of RepositoryIAMBinding type RepositoryIAMBindingSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider RepositoryIAMBindingParameters `json:"forProvider"` + ForProvider RepositoryIAMBindingParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -131,21 +120,20 @@ type RepositoryIAMBindingSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider RepositoryIAMBindingInitParameters `json:"initProvider,omitempty"` + InitProvider RepositoryIAMBindingInitParameters `json:"initProvider,omitempty"` } // RepositoryIAMBindingStatus defines the observed state of RepositoryIAMBinding. type RepositoryIAMBindingStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider RepositoryIAMBindingObservation `json:"atProvider,omitempty"` + AtProvider RepositoryIAMBindingObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - -// RepositoryIAMBinding is the Schema for the RepositoryIAMBindings API. +// RepositoryIAMBinding is the Schema for the RepositoryIAMBindings API. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" @@ -154,9 +142,9 @@ type RepositoryIAMBindingStatus struct { type RepositoryIAMBinding struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec RepositoryIAMBindingSpec `json:"spec"` - Status RepositoryIAMBindingStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec RepositoryIAMBindingSpec `json:"spec"` + Status RepositoryIAMBindingStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/container/v1alpha1/zz_repositorylifecyclepolicy_terraformed.go b/apis/container/v1alpha1/zz_repositorylifecyclepolicy_terraformed.go index c96a175..4bf2a74 100755 --- a/apis/container/v1alpha1/zz_repositorylifecyclepolicy_terraformed.go +++ b/apis/container/v1alpha1/zz_repositorylifecyclepolicy_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this RepositoryLifecyclePolicy func (mg *RepositoryLifecyclePolicy) GetTerraformResourceType() string { - return "yandex_container_repository_lifecycle_policy" + return "yandex_container_repository_lifecycle_policy" } // GetConnectionDetailsMapping for this RepositoryLifecyclePolicy func (tr *RepositoryLifecyclePolicy) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this RepositoryLifecyclePolicy func (tr *RepositoryLifecyclePolicy) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this RepositoryLifecyclePolicy func (tr *RepositoryLifecyclePolicy) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this RepositoryLifecyclePolicy func (tr *RepositoryLifecyclePolicy) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this RepositoryLifecyclePolicy func (tr *RepositoryLifecyclePolicy) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this RepositoryLifecyclePolicy func (tr *RepositoryLifecyclePolicy) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this RepositoryLifecyclePolicy func (tr *RepositoryLifecyclePolicy) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this RepositoryLifecyclePolicy func (tr *RepositoryLifecyclePolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this RepositoryLifecyclePolicy using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *RepositoryLifecyclePolicy) LateInitialize(attrs []byte) (bool, error) { - params := &RepositoryLifecyclePolicyParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &RepositoryLifecyclePolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *RepositoryLifecyclePolicy) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/container/v1alpha1/zz_repositorylifecyclepolicy_types.go b/apis/container/v1alpha1/zz_repositorylifecyclepolicy_types.go index 9573f01..3562db9 100755 --- a/apis/container/v1alpha1/zz_repositorylifecyclepolicy_types.go +++ b/apis/container/v1alpha1/zz_repositorylifecyclepolicy_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,169 +7,154 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type RepositoryLifecyclePolicyInitParameters struct { + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +crossplane:generate:reference:type=Repository -RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=Repository + RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` -// Reference to a Repository to populate repositoryId. -// +kubebuilder:validation:Optional -RepositoryIDRef *v1.Reference `json:"repositoryIdRef,omitempty" tf:"-"` + // Reference to a Repository to populate repositoryId. + // +kubebuilder:validation:Optional + RepositoryIDRef *v1.Reference `json:"repositoryIdRef,omitempty" tf:"-"` -// Selector for a Repository to populate repositoryId. -// +kubebuilder:validation:Optional -RepositoryIDSelector *v1.Selector `json:"repositoryIdSelector,omitempty" tf:"-"` + // Selector for a Repository to populate repositoryId. + // +kubebuilder:validation:Optional + RepositoryIDSelector *v1.Selector `json:"repositoryIdSelector,omitempty" tf:"-"` -// (Block List) (see below for nested schema) -Rule []RuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` + // (Block List) (see below for nested schema) + Rule []RuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` -// (String) -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // (String) + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type RepositoryLifecyclePolicyObservation struct { + // (String) + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// (String) -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` - -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) The ID of this resource. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` + // (String) + RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` -// (Block List) (see below for nested schema) -Rule []RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` + // (Block List) (see below for nested schema) + Rule []RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` -// (String) -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // (String) + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type RepositoryLifecyclePolicyParameters struct { + // (String) + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +crossplane:generate:reference:type=Repository -// +kubebuilder:validation:Optional -RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=Repository + // +kubebuilder:validation:Optional + RepositoryID *string `json:"repositoryId,omitempty" tf:"repository_id,omitempty"` -// Reference to a Repository to populate repositoryId. -// +kubebuilder:validation:Optional -RepositoryIDRef *v1.Reference `json:"repositoryIdRef,omitempty" tf:"-"` + // Reference to a Repository to populate repositoryId. + // +kubebuilder:validation:Optional + RepositoryIDRef *v1.Reference `json:"repositoryIdRef,omitempty" tf:"-"` -// Selector for a Repository to populate repositoryId. -// +kubebuilder:validation:Optional -RepositoryIDSelector *v1.Selector `json:"repositoryIdSelector,omitempty" tf:"-"` + // Selector for a Repository to populate repositoryId. + // +kubebuilder:validation:Optional + RepositoryIDSelector *v1.Selector `json:"repositoryIdSelector,omitempty" tf:"-"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Rule []RuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Rule []RuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type RuleInitParameters struct { + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + ExpirePeriod *string `json:"expirePeriod,omitempty" tf:"expire_period,omitempty"` -// (String) -ExpirePeriod *string `json:"expirePeriod,omitempty" tf:"expire_period,omitempty"` + // (Number) + RetainedTop *float64 `json:"retainedTop,omitempty" tf:"retained_top,omitempty"` -// (Number) -RetainedTop *float64 `json:"retainedTop,omitempty" tf:"retained_top,omitempty"` + // (String) + TagRegexp *string `json:"tagRegexp,omitempty" tf:"tag_regexp,omitempty"` -// (String) -TagRegexp *string `json:"tagRegexp,omitempty" tf:"tag_regexp,omitempty"` - -// (Boolean) -Untagged *bool `json:"untagged,omitempty" tf:"untagged,omitempty"` + // (Boolean) + Untagged *bool `json:"untagged,omitempty" tf:"untagged,omitempty"` } - type RuleObservation struct { + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + ExpirePeriod *string `json:"expirePeriod,omitempty" tf:"expire_period,omitempty"` -// (String) -ExpirePeriod *string `json:"expirePeriod,omitempty" tf:"expire_period,omitempty"` + // (Number) + RetainedTop *float64 `json:"retainedTop,omitempty" tf:"retained_top,omitempty"` -// (Number) -RetainedTop *float64 `json:"retainedTop,omitempty" tf:"retained_top,omitempty"` + // (String) + TagRegexp *string `json:"tagRegexp,omitempty" tf:"tag_regexp,omitempty"` -// (String) -TagRegexp *string `json:"tagRegexp,omitempty" tf:"tag_regexp,omitempty"` - -// (Boolean) -Untagged *bool `json:"untagged,omitempty" tf:"untagged,omitempty"` + // (Boolean) + Untagged *bool `json:"untagged,omitempty" tf:"untagged,omitempty"` } - type RuleParameters struct { + // (String) + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ExpirePeriod *string `json:"expirePeriod,omitempty" tf:"expire_period,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ExpirePeriod *string `json:"expirePeriod,omitempty" tf:"expire_period,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + RetainedTop *float64 `json:"retainedTop,omitempty" tf:"retained_top,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -RetainedTop *float64 `json:"retainedTop,omitempty" tf:"retained_top,omitempty"` + // (String) + // +kubebuilder:validation:Optional + TagRegexp *string `json:"tagRegexp,omitempty" tf:"tag_regexp,omitempty"` -// (String) -// +kubebuilder:validation:Optional -TagRegexp *string `json:"tagRegexp,omitempty" tf:"tag_regexp,omitempty"` - -// (Boolean) -// +kubebuilder:validation:Optional -Untagged *bool `json:"untagged,omitempty" tf:"untagged,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + Untagged *bool `json:"untagged,omitempty" tf:"untagged,omitempty"` } // RepositoryLifecyclePolicySpec defines the desired state of RepositoryLifecyclePolicy type RepositoryLifecyclePolicySpec struct { v1.ResourceSpec `json:",inline"` - ForProvider RepositoryLifecyclePolicyParameters `json:"forProvider"` + ForProvider RepositoryLifecyclePolicyParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -182,21 +165,20 @@ type RepositoryLifecyclePolicySpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider RepositoryLifecyclePolicyInitParameters `json:"initProvider,omitempty"` + InitProvider RepositoryLifecyclePolicyInitParameters `json:"initProvider,omitempty"` } // RepositoryLifecyclePolicyStatus defines the observed state of RepositoryLifecyclePolicy. type RepositoryLifecyclePolicyStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider RepositoryLifecyclePolicyObservation `json:"atProvider,omitempty"` + AtProvider RepositoryLifecyclePolicyObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - -// RepositoryLifecyclePolicy is the Schema for the RepositoryLifecyclePolicys API. +// RepositoryLifecyclePolicy is the Schema for the RepositoryLifecyclePolicys API. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" @@ -205,9 +187,9 @@ type RepositoryLifecyclePolicyStatus struct { type RepositoryLifecyclePolicy struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.status) || (has(self.initProvider) && has(self.initProvider.status))",message="spec.forProvider.status is a required parameter" - Spec RepositoryLifecyclePolicySpec `json:"spec"` - Status RepositoryLifecyclePolicyStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.status) || (has(self.initProvider) && has(self.initProvider.status))",message="spec.forProvider.status is a required parameter" + Spec RepositoryLifecyclePolicySpec `json:"spec"` + Status RepositoryLifecyclePolicyStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/dataproc/v1alpha1/zz_cluster_terraformed.go b/apis/dataproc/v1alpha1/zz_cluster_terraformed.go index 20b0642..3cbc045 100755 --- a/apis/dataproc/v1alpha1/zz_cluster_terraformed.go +++ b/apis/dataproc/v1alpha1/zz_cluster_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Cluster func (mg *Cluster) GetTerraformResourceType() string { - return "yandex_dataproc_cluster" + return "yandex_dataproc_cluster" } // GetConnectionDetailsMapping for this Cluster func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Cluster func (tr *Cluster) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Cluster func (tr *Cluster) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Cluster func (tr *Cluster) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Cluster func (tr *Cluster) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Cluster func (tr *Cluster) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Cluster func (tr *Cluster) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Cluster func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Cluster using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { - params := &ClusterParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Cluster) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/dataproc/v1alpha1/zz_cluster_types.go b/apis/dataproc/v1alpha1/zz_cluster_types.go index 1af0667..7ecd1ff 100755 --- a/apis/dataproc/v1alpha1/zz_cluster_types.go +++ b/apis/dataproc/v1alpha1/zz_cluster_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,615 +7,570 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AutoscalingConfigInitParameters struct { + // Defines an autoscaling rule based on the average CPU utilization of the instance group. If not set default autoscaling metric will be used. + CPUUtilizationTarget *string `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` -// Defines an autoscaling rule based on the average CPU utilization of the instance group. If not set default autoscaling metric will be used. -CPUUtilizationTarget *string `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + // Timeout to gracefully decommission nodes during downscaling. In seconds. + DecommissionTimeout *string `json:"decommissionTimeout,omitempty" tf:"decommission_timeout,omitempty"` -// Timeout to gracefully decommission nodes during downscaling. In seconds. -DecommissionTimeout *string `json:"decommissionTimeout,omitempty" tf:"decommission_timeout,omitempty"` + // Maximum number of nodes in autoscaling subclusters. + MaxHostsCount *float64 `json:"maxHostsCount,omitempty" tf:"max_hosts_count,omitempty"` -// Maximum number of nodes in autoscaling subclusters. -MaxHostsCount *float64 `json:"maxHostsCount,omitempty" tf:"max_hosts_count,omitempty"` + // Time in seconds allotted for averaging metrics. + MeasurementDuration *string `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` -// Time in seconds allotted for averaging metrics. -MeasurementDuration *string `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + // Bool flag -- whether to use preemptible compute instances. Preemptible instances are stopped at least once every 24 hours, and can be stopped at any time if their resources are needed by Compute. For more information, see Preemptible Virtual Machines. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` -// Bool flag -- whether to use preemptible compute instances. Preemptible instances are stopped at least once every 24 hours, and can be stopped at any time if their resources are needed by Compute. For more information, see Preemptible Virtual Machines. -Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` + // Minimum amount of time in seconds allotted for monitoring before Instance Groups can reduce the number of instances in the group. During this time, the group size doesn't decrease, even if the new metric values indicate that it should. + StabilizationDuration *string `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` -// Minimum amount of time in seconds allotted for monitoring before Instance Groups can reduce the number of instances in the group. During this time, the group size doesn't decrease, even if the new metric values indicate that it should. -StabilizationDuration *string `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` - -// The warmup time of the instance in seconds. During this time, traffic is sent to the instance, but instance metrics are not collected. -WarmupDuration *string `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` + // The warmup time of the instance in seconds. During this time, traffic is sent to the instance, but instance metrics are not collected. + WarmupDuration *string `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` } - type AutoscalingConfigObservation struct { + // Defines an autoscaling rule based on the average CPU utilization of the instance group. If not set default autoscaling metric will be used. + CPUUtilizationTarget *string `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` -// Defines an autoscaling rule based on the average CPU utilization of the instance group. If not set default autoscaling metric will be used. -CPUUtilizationTarget *string `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` - -// Timeout to gracefully decommission nodes during downscaling. In seconds. -DecommissionTimeout *string `json:"decommissionTimeout,omitempty" tf:"decommission_timeout,omitempty"` + // Timeout to gracefully decommission nodes during downscaling. In seconds. + DecommissionTimeout *string `json:"decommissionTimeout,omitempty" tf:"decommission_timeout,omitempty"` -// Maximum number of nodes in autoscaling subclusters. -MaxHostsCount *float64 `json:"maxHostsCount,omitempty" tf:"max_hosts_count,omitempty"` + // Maximum number of nodes in autoscaling subclusters. + MaxHostsCount *float64 `json:"maxHostsCount,omitempty" tf:"max_hosts_count,omitempty"` -// Time in seconds allotted for averaging metrics. -MeasurementDuration *string `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + // Time in seconds allotted for averaging metrics. + MeasurementDuration *string `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` -// Bool flag -- whether to use preemptible compute instances. Preemptible instances are stopped at least once every 24 hours, and can be stopped at any time if their resources are needed by Compute. For more information, see Preemptible Virtual Machines. -Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` + // Bool flag -- whether to use preemptible compute instances. Preemptible instances are stopped at least once every 24 hours, and can be stopped at any time if their resources are needed by Compute. For more information, see Preemptible Virtual Machines. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` -// Minimum amount of time in seconds allotted for monitoring before Instance Groups can reduce the number of instances in the group. During this time, the group size doesn't decrease, even if the new metric values indicate that it should. -StabilizationDuration *string `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + // Minimum amount of time in seconds allotted for monitoring before Instance Groups can reduce the number of instances in the group. During this time, the group size doesn't decrease, even if the new metric values indicate that it should. + StabilizationDuration *string `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` -// The warmup time of the instance in seconds. During this time, traffic is sent to the instance, but instance metrics are not collected. -WarmupDuration *string `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` + // The warmup time of the instance in seconds. During this time, traffic is sent to the instance, but instance metrics are not collected. + WarmupDuration *string `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` } - type AutoscalingConfigParameters struct { + // Defines an autoscaling rule based on the average CPU utilization of the instance group. If not set default autoscaling metric will be used. + // +kubebuilder:validation:Optional + CPUUtilizationTarget *string `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` -// Defines an autoscaling rule based on the average CPU utilization of the instance group. If not set default autoscaling metric will be used. -// +kubebuilder:validation:Optional -CPUUtilizationTarget *string `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` - -// Timeout to gracefully decommission nodes during downscaling. In seconds. -// +kubebuilder:validation:Optional -DecommissionTimeout *string `json:"decommissionTimeout,omitempty" tf:"decommission_timeout,omitempty"` + // Timeout to gracefully decommission nodes during downscaling. In seconds. + // +kubebuilder:validation:Optional + DecommissionTimeout *string `json:"decommissionTimeout,omitempty" tf:"decommission_timeout,omitempty"` -// Maximum number of nodes in autoscaling subclusters. -// +kubebuilder:validation:Optional -MaxHostsCount *float64 `json:"maxHostsCount" tf:"max_hosts_count,omitempty"` + // Maximum number of nodes in autoscaling subclusters. + // +kubebuilder:validation:Optional + MaxHostsCount *float64 `json:"maxHostsCount" tf:"max_hosts_count,omitempty"` -// Time in seconds allotted for averaging metrics. -// +kubebuilder:validation:Optional -MeasurementDuration *string `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + // Time in seconds allotted for averaging metrics. + // +kubebuilder:validation:Optional + MeasurementDuration *string `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` -// Bool flag -- whether to use preemptible compute instances. Preemptible instances are stopped at least once every 24 hours, and can be stopped at any time if their resources are needed by Compute. For more information, see Preemptible Virtual Machines. -// +kubebuilder:validation:Optional -Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` + // Bool flag -- whether to use preemptible compute instances. Preemptible instances are stopped at least once every 24 hours, and can be stopped at any time if their resources are needed by Compute. For more information, see Preemptible Virtual Machines. + // +kubebuilder:validation:Optional + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` -// Minimum amount of time in seconds allotted for monitoring before Instance Groups can reduce the number of instances in the group. During this time, the group size doesn't decrease, even if the new metric values indicate that it should. -// +kubebuilder:validation:Optional -StabilizationDuration *string `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + // Minimum amount of time in seconds allotted for monitoring before Instance Groups can reduce the number of instances in the group. During this time, the group size doesn't decrease, even if the new metric values indicate that it should. + // +kubebuilder:validation:Optional + StabilizationDuration *string `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` -// The warmup time of the instance in seconds. During this time, traffic is sent to the instance, but instance metrics are not collected. -// +kubebuilder:validation:Optional -WarmupDuration *string `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` + // The warmup time of the instance in seconds. During this time, traffic is sent to the instance, but instance metrics are not collected. + // +kubebuilder:validation:Optional + WarmupDuration *string `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` } - type ClusterConfigInitParameters struct { + // Data Proc specific options. The structure is documented below. + Hadoop []HadoopInitParameters `json:"hadoop,omitempty" tf:"hadoop,omitempty"` -// Data Proc specific options. The structure is documented below. -Hadoop []HadoopInitParameters `json:"hadoop,omitempty" tf:"hadoop,omitempty"` + // Configuration of the Data Proc subcluster. The structure is documented below. + SubclusterSpec []SubclusterSpecInitParameters `json:"subclusterSpec,omitempty" tf:"subcluster_spec,omitempty"` -// Configuration of the Data Proc subcluster. The structure is documented below. -SubclusterSpec []SubclusterSpecInitParameters `json:"subclusterSpec,omitempty" tf:"subcluster_spec,omitempty"` - -// Version of Data Proc image. -VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + // Version of Data Proc image. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` } - type ClusterConfigObservation struct { + // Data Proc specific options. The structure is documented below. + Hadoop []HadoopObservation `json:"hadoop,omitempty" tf:"hadoop,omitempty"` -// Data Proc specific options. The structure is documented below. -Hadoop []HadoopObservation `json:"hadoop,omitempty" tf:"hadoop,omitempty"` - -// Configuration of the Data Proc subcluster. The structure is documented below. -SubclusterSpec []SubclusterSpecObservation `json:"subclusterSpec,omitempty" tf:"subcluster_spec,omitempty"` + // Configuration of the Data Proc subcluster. The structure is documented below. + SubclusterSpec []SubclusterSpecObservation `json:"subclusterSpec,omitempty" tf:"subcluster_spec,omitempty"` -// Version of Data Proc image. -VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + // Version of Data Proc image. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` } - type ClusterConfigParameters struct { + // Data Proc specific options. The structure is documented below. + // +kubebuilder:validation:Optional + Hadoop []HadoopParameters `json:"hadoop,omitempty" tf:"hadoop,omitempty"` -// Data Proc specific options. The structure is documented below. -// +kubebuilder:validation:Optional -Hadoop []HadoopParameters `json:"hadoop,omitempty" tf:"hadoop,omitempty"` - -// Configuration of the Data Proc subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -SubclusterSpec []SubclusterSpecParameters `json:"subclusterSpec" tf:"subcluster_spec,omitempty"` + // Configuration of the Data Proc subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + SubclusterSpec []SubclusterSpecParameters `json:"subclusterSpec" tf:"subcluster_spec,omitempty"` -// Version of Data Proc image. -// +kubebuilder:validation:Optional -VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + // Version of Data Proc image. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` } - type ClusterInitParameters struct { + // Name of the Object Storage bucket to use for Data Proc jobs. Data Proc Agent saves output of job driver's process to specified bucket. In order for this to work service account (specified by the service_account_id argument) should be given permission to create objects within this bucket. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the Object Storage bucket to use for Data Proc jobs. Data Proc Agent saves output of job driver's process to specified bucket. In order for this to work service account (specified by the service_account_id argument) should be given permission to create objects within this bucket. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` -// Reference to a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` -// Selector for a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + // Configuration and resources for hosts that should be created with the cluster. The structure is documented below. + ClusterConfig []ClusterConfigInitParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` -// Configuration and resources for hosts that should be created with the cluster. The structure is documented below. -ClusterConfig []ClusterConfigInitParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Description of the Data Proc cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the Data Proc cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // ID of the folder to create a cluster in. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder to create a cluster in. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // A list of host group IDs to place VMs of the cluster on. + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` -// A list of host group IDs to place VMs of the cluster on. -// +listType=set -HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + // A set of key/value label pairs to assign to the Data Proc cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Data Proc cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name of a specific Data Proc cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of a specific Data Proc cluster. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // A list of security group IDs that the cluster belongs to. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// A list of security group IDs that the cluster belongs to. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // Service account to be used by the Data Proc agent to access resources of Yandex.Cloud. Selected service account should have mdb.dataproc.agent role on the folder where the Data Proc cluster will be located. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Service account to be used by the Data Proc agent to access resources of Yandex.Cloud. Selected service account should have mdb.dataproc.agent role on the folder where the Data Proc cluster will be located. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` -// Reference to a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` -// Selector for a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + // Whether to enable UI Proxy feature. + UIProxy *bool `json:"uiProxy,omitempty" tf:"ui_proxy,omitempty"` -// Whether to enable UI Proxy feature. -UIProxy *bool `json:"uiProxy,omitempty" tf:"ui_proxy,omitempty"` - -// ID of the availability zone to create cluster in. If it is not provided, the default provider zone is used. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // ID of the availability zone to create cluster in. If it is not provided, the default provider zone is used. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type ClusterObservation struct { + // Name of the Object Storage bucket to use for Data Proc jobs. Data Proc Agent saves output of job driver's process to specified bucket. In order for this to work service account (specified by the service_account_id argument) should be given permission to create objects within this bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the Object Storage bucket to use for Data Proc jobs. Data Proc Agent saves output of job driver's process to specified bucket. In order for this to work service account (specified by the service_account_id argument) should be given permission to create objects within this bucket. -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` - -// Configuration and resources for hosts that should be created with the cluster. The structure is documented below. -ClusterConfig []ClusterConfigObservation `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + // Configuration and resources for hosts that should be created with the cluster. The structure is documented below. + ClusterConfig []ClusterConfigObservation `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` -// (Computed) The Data Proc cluster creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // (Computed) The Data Proc cluster creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the Data Proc cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Data Proc cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// ID of the folder to create a cluster in. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // ID of the folder to create a cluster in. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// A list of host group IDs to place VMs of the cluster on. -// +listType=set -HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + // A list of host group IDs to place VMs of the cluster on. + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` -// (Computed) ID of a new Data Proc cluster. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // (Computed) ID of a new Data Proc cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the Data Proc cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Data Proc cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of a specific Data Proc cluster. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of a specific Data Proc cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A list of security group IDs that the cluster belongs to. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A list of security group IDs that the cluster belongs to. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// Service account to be used by the Data Proc agent to access resources of Yandex.Cloud. Selected service account should have mdb.dataproc.agent role on the folder where the Data Proc cluster will be located. -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account to be used by the Data Proc agent to access resources of Yandex.Cloud. Selected service account should have mdb.dataproc.agent role on the folder where the Data Proc cluster will be located. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Whether to enable UI Proxy feature. -UIProxy *bool `json:"uiProxy,omitempty" tf:"ui_proxy,omitempty"` + // Whether to enable UI Proxy feature. + UIProxy *bool `json:"uiProxy,omitempty" tf:"ui_proxy,omitempty"` -// ID of the availability zone to create cluster in. If it is not provided, the default provider zone is used. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // ID of the availability zone to create cluster in. If it is not provided, the default provider zone is used. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type ClusterParameters struct { - -// Name of the Object Storage bucket to use for Data Proc jobs. Data Proc Agent saves output of job driver's process to specified bucket. In order for this to work service account (specified by the service_account_id argument) should be given permission to create objects within this bucket. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) -// +kubebuilder:validation:Optional -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` - -// Reference to a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` - -// Selector for a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` - -// Configuration and resources for hosts that should be created with the cluster. The structure is documented below. -// +kubebuilder:validation:Optional -ClusterConfig []ClusterConfigParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` - -// Inhibits deletion of the cluster. Can be either true or false. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - -// Description of the Data Proc cluster. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// ID of the folder to create a cluster in. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` - -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` - -// A list of host group IDs to place VMs of the cluster on. -// +kubebuilder:validation:Optional -// +listType=set -HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` - -// A set of key/value label pairs to assign to the Data Proc cluster. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - -// Name of a specific Data Proc cluster. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// A list of security group IDs that the cluster belongs to. -// +kubebuilder:validation:Optional -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` - -// Service account to be used by the Data Proc agent to access resources of Yandex.Cloud. Selected service account should have mdb.dataproc.agent role on the folder where the Data Proc cluster will be located. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// Reference to a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` - -// Selector for a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` - -// Whether to enable UI Proxy feature. -// +kubebuilder:validation:Optional -UIProxy *bool `json:"uiProxy,omitempty" tf:"ui_proxy,omitempty"` - -// ID of the availability zone to create cluster in. If it is not provided, the default provider zone is used. -// +kubebuilder:validation:Optional -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // Name of the Object Storage bucket to use for Data Proc jobs. Data Proc Agent saves output of job driver's process to specified bucket. In order for this to work service account (specified by the service_account_id argument) should be given permission to create objects within this bucket. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Configuration and resources for hosts that should be created with the cluster. The structure is documented below. + // +kubebuilder:validation:Optional + ClusterConfig []ClusterConfigParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Data Proc cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder to create a cluster in. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A list of host group IDs to place VMs of the cluster on. + // +kubebuilder:validation:Optional + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // A set of key/value label pairs to assign to the Data Proc cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of a specific Data Proc cluster. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of security group IDs that the cluster belongs to. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Service account to be used by the Data Proc agent to access resources of Yandex.Cloud. Selected service account should have mdb.dataproc.agent role on the folder where the Data Proc cluster will be located. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // Whether to enable UI Proxy feature. + // +kubebuilder:validation:Optional + UIProxy *bool `json:"uiProxy,omitempty" tf:"ui_proxy,omitempty"` + + // ID of the availability zone to create cluster in. If it is not provided, the default provider zone is used. + // +kubebuilder:validation:Optional + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type HadoopInitParameters struct { + // List of initialization scripts. The structure is documented below. + InitializationAction []InitializationActionInitParameters `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` -// List of initialization scripts. The structure is documented below. -InitializationAction []InitializationActionInitParameters `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` + // A set of key/value pairs that are used to configure cluster services. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` -// A set of key/value pairs that are used to configure cluster services. -// +mapType=granular -Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + // List of SSH public keys to put to the hosts of the cluster. For information on how to connect to the cluster, see the official documentation. + // +listType=set + SSHPublicKeys []*string `json:"sshPublicKeys,omitempty" tf:"ssh_public_keys,omitempty"` -// List of SSH public keys to put to the hosts of the cluster. For information on how to connect to the cluster, see the official documentation. -// +listType=set -SSHPublicKeys []*string `json:"sshPublicKeys,omitempty" tf:"ssh_public_keys,omitempty"` - -// List of services to run on Data Proc cluster. -// +listType=set -Services []*string `json:"services,omitempty" tf:"services,omitempty"` + // List of services to run on Data Proc cluster. + // +listType=set + Services []*string `json:"services,omitempty" tf:"services,omitempty"` } - type HadoopObservation struct { + // List of initialization scripts. The structure is documented below. + InitializationAction []InitializationActionObservation `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` -// List of initialization scripts. The structure is documented below. -InitializationAction []InitializationActionObservation `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` - -// A set of key/value pairs that are used to configure cluster services. -// +mapType=granular -Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + // A set of key/value pairs that are used to configure cluster services. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` -// List of SSH public keys to put to the hosts of the cluster. For information on how to connect to the cluster, see the official documentation. -// +listType=set -SSHPublicKeys []*string `json:"sshPublicKeys,omitempty" tf:"ssh_public_keys,omitempty"` + // List of SSH public keys to put to the hosts of the cluster. For information on how to connect to the cluster, see the official documentation. + // +listType=set + SSHPublicKeys []*string `json:"sshPublicKeys,omitempty" tf:"ssh_public_keys,omitempty"` -// List of services to run on Data Proc cluster. -// +listType=set -Services []*string `json:"services,omitempty" tf:"services,omitempty"` + // List of services to run on Data Proc cluster. + // +listType=set + Services []*string `json:"services,omitempty" tf:"services,omitempty"` } - type HadoopParameters struct { + // List of initialization scripts. The structure is documented below. + // +kubebuilder:validation:Optional + InitializationAction []InitializationActionParameters `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` -// List of initialization scripts. The structure is documented below. -// +kubebuilder:validation:Optional -InitializationAction []InitializationActionParameters `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` - -// A set of key/value pairs that are used to configure cluster services. -// +kubebuilder:validation:Optional -// +mapType=granular -Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + // A set of key/value pairs that are used to configure cluster services. + // +kubebuilder:validation:Optional + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` -// List of SSH public keys to put to the hosts of the cluster. For information on how to connect to the cluster, see the official documentation. -// +kubebuilder:validation:Optional -// +listType=set -SSHPublicKeys []*string `json:"sshPublicKeys,omitempty" tf:"ssh_public_keys,omitempty"` + // List of SSH public keys to put to the hosts of the cluster. For information on how to connect to the cluster, see the official documentation. + // +kubebuilder:validation:Optional + // +listType=set + SSHPublicKeys []*string `json:"sshPublicKeys,omitempty" tf:"ssh_public_keys,omitempty"` -// List of services to run on Data Proc cluster. -// +kubebuilder:validation:Optional -// +listType=set -Services []*string `json:"services,omitempty" tf:"services,omitempty"` + // List of services to run on Data Proc cluster. + // +kubebuilder:validation:Optional + // +listType=set + Services []*string `json:"services,omitempty" tf:"services,omitempty"` } - type InitializationActionInitParameters struct { + // List of arguments of the initialization script. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` -// List of arguments of the initialization script. -Args []*string `json:"args,omitempty" tf:"args,omitempty"` + // Script execution timeout, in seconds. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Script execution timeout, in seconds. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` - -// Script URI. -URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + // Script URI. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` } - type InitializationActionObservation struct { + // List of arguments of the initialization script. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` -// List of arguments of the initialization script. -Args []*string `json:"args,omitempty" tf:"args,omitempty"` - -// Script execution timeout, in seconds. -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + // Script execution timeout, in seconds. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Script URI. -URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + // Script URI. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` } - type InitializationActionParameters struct { + // List of arguments of the initialization script. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` -// List of arguments of the initialization script. -// +kubebuilder:validation:Optional -Args []*string `json:"args,omitempty" tf:"args,omitempty"` - -// Script execution timeout, in seconds. -// +kubebuilder:validation:Optional -Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + // Script execution timeout, in seconds. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Script URI. -// +kubebuilder:validation:Optional -URI *string `json:"uri" tf:"uri,omitempty"` + // Script URI. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` } - type ResourcesInitParameters struct { + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of a host. One of network-hdd (default) or network-ssd. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of a host. One of network-hdd (default) or network-ssd. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -// The ID of the preset for computational resources available to a host. All available presets are listed in the documentation. -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + // The ID of the preset for computational resources available to a host. All available presets are listed in the documentation. + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesObservation struct { + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of a host. One of network-hdd (default) or network-ssd. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of a host. One of network-hdd (default) or network-ssd. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// The ID of the preset for computational resources available to a host. All available presets are listed in the documentation. -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + // The ID of the preset for computational resources available to a host. All available presets are listed in the documentation. + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesParameters struct { + // Volume of the storage available to a host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` - -// Type of the storage of a host. One of network-hdd (default) or network-ssd. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of a host. One of network-hdd (default) or network-ssd. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// The ID of the preset for computational resources available to a host. All available presets are listed in the documentation. -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // The ID of the preset for computational resources available to a host. All available presets are listed in the documentation. + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type SubclusterSpecInitParameters struct { + // If true then assign public IP addresses to the hosts of the subclusters. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// If true then assign public IP addresses to the hosts of the subclusters. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // Autoscaling configuration for compute subclusters. + AutoscalingConfig []AutoscalingConfigInitParameters `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` -// Autoscaling configuration for compute subclusters. -AutoscalingConfig []AutoscalingConfigInitParameters `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` + // Number of hosts within Data Proc subcluster. + HostsCount *float64 `json:"hostsCount,omitempty" tf:"hosts_count,omitempty"` -// Number of hosts within Data Proc subcluster. -HostsCount *float64 `json:"hostsCount,omitempty" tf:"hosts_count,omitempty"` + // Name of the Data Proc subcluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the Data Proc subcluster. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Resources allocated to each host of the Data Proc subcluster. The structure is documented below. + Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` -// Resources allocated to each host of the Data Proc subcluster. The structure is documented below. -Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Role of the subcluster in the Data Proc cluster. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// Role of the subcluster in the Data Proc cluster. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The ID of the subnet, to which hosts of the subcluster belong. Subnets of all the subclusters must belong to the same VPC network. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet, to which hosts of the subcluster belong. Subnets of all the subclusters must belong to the same VPC network. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type SubclusterSpecObservation struct { + // If true then assign public IP addresses to the hosts of the subclusters. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// If true then assign public IP addresses to the hosts of the subclusters. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` - -// Autoscaling configuration for compute subclusters. -AutoscalingConfig []AutoscalingConfigObservation `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` + // Autoscaling configuration for compute subclusters. + AutoscalingConfig []AutoscalingConfigObservation `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` -// Number of hosts within Data Proc subcluster. -HostsCount *float64 `json:"hostsCount,omitempty" tf:"hosts_count,omitempty"` + // Number of hosts within Data Proc subcluster. + HostsCount *float64 `json:"hostsCount,omitempty" tf:"hosts_count,omitempty"` -// (Computed) ID of a new Data Proc cluster. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // (Computed) ID of a new Data Proc cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Name of the Data Proc subcluster. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Data Proc subcluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Resources allocated to each host of the Data Proc subcluster. The structure is documented below. -Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to each host of the Data Proc subcluster. The structure is documented below. + Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` -// Role of the subcluster in the Data Proc cluster. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // Role of the subcluster in the Data Proc cluster. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The ID of the subnet, to which hosts of the subcluster belong. Subnets of all the subclusters must belong to the same VPC network. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet, to which hosts of the subcluster belong. Subnets of all the subclusters must belong to the same VPC network. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type SubclusterSpecParameters struct { + // If true then assign public IP addresses to the hosts of the subclusters. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// If true then assign public IP addresses to the hosts of the subclusters. -// +kubebuilder:validation:Optional -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` - -// Autoscaling configuration for compute subclusters. -// +kubebuilder:validation:Optional -AutoscalingConfig []AutoscalingConfigParameters `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` + // Autoscaling configuration for compute subclusters. + // +kubebuilder:validation:Optional + AutoscalingConfig []AutoscalingConfigParameters `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` -// Number of hosts within Data Proc subcluster. -// +kubebuilder:validation:Optional -HostsCount *float64 `json:"hostsCount" tf:"hosts_count,omitempty"` + // Number of hosts within Data Proc subcluster. + // +kubebuilder:validation:Optional + HostsCount *float64 `json:"hostsCount" tf:"hosts_count,omitempty"` -// Name of the Data Proc subcluster. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // Name of the Data Proc subcluster. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// Resources allocated to each host of the Data Proc subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []ResourcesParameters `json:"resources" tf:"resources,omitempty"` + // Resources allocated to each host of the Data Proc subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []ResourcesParameters `json:"resources" tf:"resources,omitempty"` -// Role of the subcluster in the Data Proc cluster. -// +kubebuilder:validation:Optional -Role *string `json:"role" tf:"role,omitempty"` + // Role of the subcluster in the Data Proc cluster. + // +kubebuilder:validation:Optional + Role *string `json:"role" tf:"role,omitempty"` -// The ID of the subnet, to which hosts of the subcluster belong. Subnets of all the subclusters must belong to the same VPC network. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet, to which hosts of the subcluster belong. Subnets of all the subclusters must belong to the same VPC network. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } // ClusterSpec defines the desired state of Cluster type ClusterSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider ClusterParameters `json:"forProvider"` + ForProvider ClusterParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -628,20 +581,19 @@ type ClusterSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider ClusterInitParameters `json:"initProvider,omitempty"` + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` } // ClusterStatus defines the observed state of Cluster. type ClusterStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider ClusterObservation `json:"atProvider,omitempty"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Cluster is the Schema for the Clusters API. Manages a Data Proc cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -651,10 +603,10 @@ type ClusterStatus struct { type Cluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterConfig) || (has(self.initProvider) && has(self.initProvider.clusterConfig))",message="spec.forProvider.clusterConfig is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec ClusterSpec `json:"spec"` - Status ClusterStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterConfig) || (has(self.initProvider) && has(self.initProvider.clusterConfig))",message="spec.forProvider.clusterConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/dataproc/v1alpha1/zz_generated.conversion_hubs.go b/apis/dataproc/v1alpha1/zz_generated.conversion_hubs.go index 1f5383a..aecfbdf 100755 --- a/apis/dataproc/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/dataproc/v1alpha1/zz_generated.conversion_hubs.go @@ -1,10 +1,6 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 - - // Hub marks this type as a conversion hub. - func (tr *Cluster) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} diff --git a/apis/dataproc/v1alpha1/zz_generated.deepcopy.go b/apis/dataproc/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b0b9dd1 --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1291 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigInitParameters) DeepCopyInto(out *AutoscalingConfigInitParameters) { + *out = *in + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(string) + **out = **in + } + if in.DecommissionTimeout != nil { + in, out := &in.DecommissionTimeout, &out.DecommissionTimeout + *out = new(string) + **out = **in + } + if in.MaxHostsCount != nil { + in, out := &in.MaxHostsCount, &out.MaxHostsCount + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(string) + **out = **in + } + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(string) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigInitParameters. +func (in *AutoscalingConfigInitParameters) DeepCopy() *AutoscalingConfigInitParameters { + if in == nil { + return nil + } + out := new(AutoscalingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigObservation) DeepCopyInto(out *AutoscalingConfigObservation) { + *out = *in + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(string) + **out = **in + } + if in.DecommissionTimeout != nil { + in, out := &in.DecommissionTimeout, &out.DecommissionTimeout + *out = new(string) + **out = **in + } + if in.MaxHostsCount != nil { + in, out := &in.MaxHostsCount, &out.MaxHostsCount + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(string) + **out = **in + } + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(string) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigObservation. +func (in *AutoscalingConfigObservation) DeepCopy() *AutoscalingConfigObservation { + if in == nil { + return nil + } + out := new(AutoscalingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigParameters) DeepCopyInto(out *AutoscalingConfigParameters) { + *out = *in + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(string) + **out = **in + } + if in.DecommissionTimeout != nil { + in, out := &in.DecommissionTimeout, &out.DecommissionTimeout + *out = new(string) + **out = **in + } + if in.MaxHostsCount != nil { + in, out := &in.MaxHostsCount, &out.MaxHostsCount + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(string) + **out = **in + } + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(string) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigParameters. +func (in *AutoscalingConfigParameters) DeepCopy() *AutoscalingConfigParameters { + if in == nil { + return nil + } + out := new(AutoscalingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigInitParameters) DeepCopyInto(out *ClusterConfigInitParameters) { + *out = *in + if in.Hadoop != nil { + in, out := &in.Hadoop, &out.Hadoop + *out = make([]HadoopInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubclusterSpec != nil { + in, out := &in.SubclusterSpec, &out.SubclusterSpec + *out = make([]SubclusterSpecInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigInitParameters. +func (in *ClusterConfigInitParameters) DeepCopy() *ClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(ClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigObservation) DeepCopyInto(out *ClusterConfigObservation) { + *out = *in + if in.Hadoop != nil { + in, out := &in.Hadoop, &out.Hadoop + *out = make([]HadoopObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubclusterSpec != nil { + in, out := &in.SubclusterSpec, &out.SubclusterSpec + *out = make([]SubclusterSpecObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigObservation. +func (in *ClusterConfigObservation) DeepCopy() *ClusterConfigObservation { + if in == nil { + return nil + } + out := new(ClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigParameters) DeepCopyInto(out *ClusterConfigParameters) { + *out = *in + if in.Hadoop != nil { + in, out := &in.Hadoop, &out.Hadoop + *out = make([]HadoopParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubclusterSpec != nil { + in, out := &in.SubclusterSpec, &out.SubclusterSpec + *out = make([]SubclusterSpecParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigParameters. +func (in *ClusterConfigParameters) DeepCopy() *ClusterConfigParameters { + if in == nil { + return nil + } + out := new(ClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UIProxy != nil { + in, out := &in.UIProxy, &out.UIProxy + *out = new(bool) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.UIProxy != nil { + in, out := &in.UIProxy, &out.UIProxy + *out = new(bool) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UIProxy != nil { + in, out := &in.UIProxy, &out.UIProxy + *out = new(bool) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopInitParameters) DeepCopyInto(out *HadoopInitParameters) { + *out = *in + if in.InitializationAction != nil { + in, out := &in.InitializationAction, &out.InitializationAction + *out = make([]InitializationActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SSHPublicKeys != nil { + in, out := &in.SSHPublicKeys, &out.SSHPublicKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopInitParameters. +func (in *HadoopInitParameters) DeepCopy() *HadoopInitParameters { + if in == nil { + return nil + } + out := new(HadoopInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopObservation) DeepCopyInto(out *HadoopObservation) { + *out = *in + if in.InitializationAction != nil { + in, out := &in.InitializationAction, &out.InitializationAction + *out = make([]InitializationActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SSHPublicKeys != nil { + in, out := &in.SSHPublicKeys, &out.SSHPublicKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopObservation. +func (in *HadoopObservation) DeepCopy() *HadoopObservation { + if in == nil { + return nil + } + out := new(HadoopObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopParameters) DeepCopyInto(out *HadoopParameters) { + *out = *in + if in.InitializationAction != nil { + in, out := &in.InitializationAction, &out.InitializationAction + *out = make([]InitializationActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SSHPublicKeys != nil { + in, out := &in.SSHPublicKeys, &out.SSHPublicKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopParameters. +func (in *HadoopParameters) DeepCopy() *HadoopParameters { + if in == nil { + return nil + } + out := new(HadoopParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionInitParameters) DeepCopyInto(out *InitializationActionInitParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionInitParameters. +func (in *InitializationActionInitParameters) DeepCopy() *InitializationActionInitParameters { + if in == nil { + return nil + } + out := new(InitializationActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionObservation) DeepCopyInto(out *InitializationActionObservation) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionObservation. +func (in *InitializationActionObservation) DeepCopy() *InitializationActionObservation { + if in == nil { + return nil + } + out := new(InitializationActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionParameters) DeepCopyInto(out *InitializationActionParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionParameters. +func (in *InitializationActionParameters) DeepCopy() *InitializationActionParameters { + if in == nil { + return nil + } + out := new(InitializationActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesInitParameters) DeepCopyInto(out *ResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesInitParameters. +func (in *ResourcesInitParameters) DeepCopy() *ResourcesInitParameters { + if in == nil { + return nil + } + out := new(ResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesObservation) DeepCopyInto(out *ResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesObservation. +func (in *ResourcesObservation) DeepCopy() *ResourcesObservation { + if in == nil { + return nil + } + out := new(ResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesParameters) DeepCopyInto(out *ResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesParameters. +func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { + if in == nil { + return nil + } + out := new(ResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubclusterSpecInitParameters) DeepCopyInto(out *SubclusterSpecInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.AutoscalingConfig != nil { + in, out := &in.AutoscalingConfig, &out.AutoscalingConfig + *out = make([]AutoscalingConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostsCount != nil { + in, out := &in.HostsCount, &out.HostsCount + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubclusterSpecInitParameters. +func (in *SubclusterSpecInitParameters) DeepCopy() *SubclusterSpecInitParameters { + if in == nil { + return nil + } + out := new(SubclusterSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubclusterSpecObservation) DeepCopyInto(out *SubclusterSpecObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.AutoscalingConfig != nil { + in, out := &in.AutoscalingConfig, &out.AutoscalingConfig + *out = make([]AutoscalingConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostsCount != nil { + in, out := &in.HostsCount, &out.HostsCount + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubclusterSpecObservation. +func (in *SubclusterSpecObservation) DeepCopy() *SubclusterSpecObservation { + if in == nil { + return nil + } + out := new(SubclusterSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubclusterSpecParameters) DeepCopyInto(out *SubclusterSpecParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.AutoscalingConfig != nil { + in, out := &in.AutoscalingConfig, &out.AutoscalingConfig + *out = make([]AutoscalingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostsCount != nil { + in, out := &in.HostsCount, &out.HostsCount + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubclusterSpecParameters. +func (in *SubclusterSpecParameters) DeepCopy() *SubclusterSpecParameters { + if in == nil { + return nil + } + out := new(SubclusterSpecParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dataproc/v1alpha1/zz_generated.resolvers.go b/apis/dataproc/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..03f9705 --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,161 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha13 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Cluster. +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{ + List: &v1alpha1.BucketList{}, + Managed: &v1alpha1.Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.ClusterConfig); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID") + } + mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha12.FolderList{}, + Managed: &v1alpha12.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha13.ServiceAccountList{}, + Managed: &v1alpha13.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{ + List: &v1alpha1.BucketList{}, + Managed: &v1alpha1.Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.ClusterConfig); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID") + } + mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha12.FolderList{}, + Managed: &v1alpha12.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha13.ServiceAccountList{}, + Managed: &v1alpha13.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/dataproc/v1alpha1/zz_groupversion_info.go b/apis/dataproc/v1alpha1/zz_groupversion_info.go index ef1dbf4..b03faf4 100755 --- a/apis/dataproc/v1alpha1/zz_groupversion_info.go +++ b/apis/dataproc/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/datatransfer/v1alpha1/zz_endpoint_terraformed.go b/apis/datatransfer/v1alpha1/zz_endpoint_terraformed.go index f6df87b..38f582f 100755 --- a/apis/datatransfer/v1alpha1/zz_endpoint_terraformed.go +++ b/apis/datatransfer/v1alpha1/zz_endpoint_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Endpoint func (mg *Endpoint) GetTerraformResourceType() string { - return "yandex_datatransfer_endpoint" + return "yandex_datatransfer_endpoint" } // GetConnectionDetailsMapping for this Endpoint func (tr *Endpoint) GetConnectionDetailsMapping() map[string]string { - return map[string]string{ "settings[*].clickhouse_source[*].connection[*].connection_options[*].password[*].raw": "settings[*].clickhouseSource[*].connection[*].connectionOptions[*].password[*].rawSecretRef", "settings[*].clickhouse_target[*].connection[*].connection_options[*].password[*].raw": "settings[*].clickhouseTarget[*].connection[*].connectionOptions[*].password[*].rawSecretRef", "settings[*].kafka_source[*].auth[*].sasl[*].password[*].raw": "settings[*].kafkaSource[*].auth[*].sasl[*].password[*].rawSecretRef", "settings[*].kafka_target[*].auth[*].sasl[*].password[*].raw": "settings[*].kafkaTarget[*].auth[*].sasl[*].password[*].rawSecretRef", "settings[*].metrika_source[*].token[*].raw": "settings[*].metrikaSource[*].token[*].rawSecretRef", "settings[*].mongo_source[*].connection[*].connection_options[*].password[*].raw": "settings[*].mongoSource[*].connection[*].connectionOptions[*].password[*].rawSecretRef", "settings[*].mongo_target[*].connection[*].connection_options[*].password[*].raw": "settings[*].mongoTarget[*].connection[*].connectionOptions[*].password[*].rawSecretRef", "settings[*].mysql_source[*].password[*].raw": "settings[*].mysqlSource[*].password[*].rawSecretRef", "settings[*].mysql_target[*].password[*].raw": "settings[*].mysqlTarget[*].password[*].rawSecretRef", "settings[*].postgres_source[*].password[*].raw": "settings[*].postgresSource[*].password[*].rawSecretRef", "settings[*].postgres_target[*].password[*].raw": "settings[*].postgresTarget[*].password[*].rawSecretRef", "settings[*].ydb_source[*].sa_key_content": "settings[*].ydbSource[*].saKeyContentSecretRef", "settings[*].ydb_target[*].sa_key_content": "settings[*].ydbTarget[*].saKeyContentSecretRef", } + return map[string]string{"settings[*].clickhouse_source[*].connection[*].connection_options[*].password[*].raw": "settings[*].clickhouseSource[*].connection[*].connectionOptions[*].password[*].rawSecretRef", "settings[*].clickhouse_target[*].connection[*].connection_options[*].password[*].raw": "settings[*].clickhouseTarget[*].connection[*].connectionOptions[*].password[*].rawSecretRef", "settings[*].kafka_source[*].auth[*].sasl[*].password[*].raw": "settings[*].kafkaSource[*].auth[*].sasl[*].password[*].rawSecretRef", "settings[*].kafka_target[*].auth[*].sasl[*].password[*].raw": "settings[*].kafkaTarget[*].auth[*].sasl[*].password[*].rawSecretRef", "settings[*].metrika_source[*].token[*].raw": "settings[*].metrikaSource[*].token[*].rawSecretRef", "settings[*].mongo_source[*].connection[*].connection_options[*].password[*].raw": "settings[*].mongoSource[*].connection[*].connectionOptions[*].password[*].rawSecretRef", "settings[*].mongo_target[*].connection[*].connection_options[*].password[*].raw": "settings[*].mongoTarget[*].connection[*].connectionOptions[*].password[*].rawSecretRef", "settings[*].mysql_source[*].password[*].raw": "settings[*].mysqlSource[*].password[*].rawSecretRef", "settings[*].mysql_target[*].password[*].raw": "settings[*].mysqlTarget[*].password[*].rawSecretRef", "settings[*].postgres_source[*].password[*].raw": "settings[*].postgresSource[*].password[*].rawSecretRef", "settings[*].postgres_target[*].password[*].raw": "settings[*].postgresTarget[*].password[*].rawSecretRef", "settings[*].ydb_source[*].sa_key_content": "settings[*].ydbSource[*].saKeyContentSecretRef", "settings[*].ydb_target[*].sa_key_content": "settings[*].ydbTarget[*].saKeyContentSecretRef"} } // GetObservation of this Endpoint func (tr *Endpoint) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Endpoint func (tr *Endpoint) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Endpoint func (tr *Endpoint) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Endpoint func (tr *Endpoint) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Endpoint func (tr *Endpoint) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Endpoint func (tr *Endpoint) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Endpoint func (tr *Endpoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Endpoint using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Endpoint) LateInitialize(attrs []byte) (bool, error) { - params := &EndpointParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &EndpointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Endpoint) GetTerraformSchemaVersion() int { - return 1 + return 1 } diff --git a/apis/datatransfer/v1alpha1/zz_endpoint_types.go b/apis/datatransfer/v1alpha1/zz_endpoint_types.go index 04e8511..4672956 100755 --- a/apis/datatransfer/v1alpha1/zz_endpoint_types.go +++ b/apis/datatransfer/v1alpha1/zz_endpoint_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,6334 +7,5503 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AltNamesInitParameters struct { + // (String) + FromName *string `json:"fromName,omitempty" tf:"from_name,omitempty"` -// (String) -FromName *string `json:"fromName,omitempty" tf:"from_name,omitempty"` - -// (String) -ToName *string `json:"toName,omitempty" tf:"to_name,omitempty"` + // (String) + ToName *string `json:"toName,omitempty" tf:"to_name,omitempty"` } - type AltNamesObservation struct { + // (String) + FromName *string `json:"fromName,omitempty" tf:"from_name,omitempty"` -// (String) -FromName *string `json:"fromName,omitempty" tf:"from_name,omitempty"` - -// (String) -ToName *string `json:"toName,omitempty" tf:"to_name,omitempty"` + // (String) + ToName *string `json:"toName,omitempty" tf:"to_name,omitempty"` } - type AltNamesParameters struct { + // (String) + // +kubebuilder:validation:Optional + FromName *string `json:"fromName,omitempty" tf:"from_name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -FromName *string `json:"fromName,omitempty" tf:"from_name,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -ToName *string `json:"toName,omitempty" tf:"to_name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ToName *string `json:"toName,omitempty" tf:"to_name,omitempty"` } - type AuditTrailsV1ParserInitParameters struct { - } - type AuditTrailsV1ParserObservation struct { - } - type AuditTrailsV1ParserParameters struct { - } - type AuthInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + NoAuth []NoAuthInitParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -NoAuth []NoAuthInitParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Sasl []SaslInitParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Sasl []SaslInitParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` } - type AuthNoAuthInitParameters struct { - } - type AuthNoAuthObservation struct { - } - type AuthNoAuthParameters struct { - } - type AuthObservation struct { + // (Block List, Max: 1) (see below for nested schema) + NoAuth []NoAuthParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -NoAuth []NoAuthParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Sasl []SaslObservation `json:"sasl,omitempty" tf:"sasl,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Sasl []SaslObservation `json:"sasl,omitempty" tf:"sasl,omitempty"` } - type AuthParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + NoAuth []NoAuthParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -NoAuth []NoAuthParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Sasl []SaslParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Sasl []SaslParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` } - type AuthSaslInitParameters struct { + // (String) + Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` -// (String) -Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Password []AuthSaslPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []AuthSaslPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type AuthSaslObservation struct { + // (String) + Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` -// (String) -Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Password []AuthSaslPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []AuthSaslPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type AuthSaslParameters struct { + // (String) + // +kubebuilder:validation:Optional + Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []AuthSaslPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []AuthSaslPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type AuthSaslPasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type AuthSaslPasswordObservation struct { - } - type AuthSaslPasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type ClickhouseSourceInitParameters struct { + // (String) + ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` -// (String) -ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Connection []ConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []ConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type ClickhouseSourceObservation struct { + // (String) + ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` -// (String) -ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []ConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []ConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type ClickhouseSourceParameters struct { + // (String) + // +kubebuilder:validation:Optional + ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []ConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []ConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type ClickhouseTargetConnectionInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -ConnectionOptions []ConnectionConnectionOptionsInitParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ConnectionOptions []ConnectionConnectionOptionsInitParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type ClickhouseTargetConnectionObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -ConnectionOptions []ConnectionConnectionOptionsObservation `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ConnectionOptions []ConnectionConnectionOptionsObservation `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type ClickhouseTargetConnectionParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ConnectionOptions []ConnectionConnectionOptionsParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ConnectionOptions []ConnectionConnectionOptionsParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type ClickhouseTargetInitParameters struct { + // (Block List) (see below for nested schema) + AltNames []AltNamesInitParameters `json:"altNames,omitempty" tf:"alt_names,omitempty"` -// (Block List) (see below for nested schema) -AltNames []AltNamesInitParameters `json:"altNames,omitempty" tf:"alt_names,omitempty"` + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (String) + ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` -// (String) -ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []ClickhouseTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []ClickhouseTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Sharding []ShardingInitParameters `json:"sharding,omitempty" tf:"sharding,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Sharding []ShardingInitParameters `json:"sharding,omitempty" tf:"sharding,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type ClickhouseTargetObservation struct { + // (Block List) (see below for nested schema) + AltNames []AltNamesObservation `json:"altNames,omitempty" tf:"alt_names,omitempty"` -// (Block List) (see below for nested schema) -AltNames []AltNamesObservation `json:"altNames,omitempty" tf:"alt_names,omitempty"` + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (String) + ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` -// (String) -ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []ClickhouseTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []ClickhouseTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Sharding []ShardingObservation `json:"sharding,omitempty" tf:"sharding,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Sharding []ShardingObservation `json:"sharding,omitempty" tf:"sharding,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type ClickhouseTargetParameters struct { + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + AltNames []AltNamesParameters `json:"altNames,omitempty" tf:"alt_names,omitempty"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -AltNames []AltNamesParameters `json:"altNames,omitempty" tf:"alt_names,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ClickhouseClusterName *string `json:"clickhouseClusterName,omitempty" tf:"clickhouse_cluster_name,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []ClickhouseTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []ClickhouseTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Sharding []ShardingParameters `json:"sharding,omitempty" tf:"sharding,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Sharding []ShardingParameters `json:"sharding,omitempty" tf:"sharding,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type CloudLoggingParserInitParameters struct { - } - type CloudLoggingParserObservation struct { - } - type CloudLoggingParserParameters struct { - } - type CollectionsInitParameters struct { + // (String) + CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` -// (String) -CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` - -// (String) -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + // (String) + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` } - type CollectionsObservation struct { + // (String) + CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` -// (String) -CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` - -// (String) -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + // (String) + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` } - type CollectionsParameters struct { + // (String) + // +kubebuilder:validation:Optional + CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` } - type ColumnValueHashInitParameters struct { - -// (String) -ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` + // (String) + ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` } - type ColumnValueHashObservation struct { - -// (String) -ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` + // (String) + ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` } - type ColumnValueHashParameters struct { - -// (String) -// +kubebuilder:validation:Optional -ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` } - type ColumnValueInitParameters struct { - -// (String) -StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` + // (String) + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` } - type ColumnValueObservation struct { - -// (String) -StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` + // (String) + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` } - type ColumnValueParameters struct { - -// (String) -// +kubebuilder:validation:Optional -StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` + // (String) + // +kubebuilder:validation:Optional + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` } - type ConnectionConnectionOptionsInitParameters struct { + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []ConnectionOptionsOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -OnPremise []ConnectionOptionsOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []ConnectionOptionsPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []ConnectionOptionsPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type ConnectionConnectionOptionsObservation struct { + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []ConnectionOptionsOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -OnPremise []ConnectionOptionsOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []ConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []ConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type ConnectionConnectionOptionsOnPremiseInitParameters struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` - -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (String) -ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` + // (String) + ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -TLSMode []ConnectionOptionsOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []ConnectionOptionsOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type ConnectionConnectionOptionsOnPremiseObservation struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` - -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (String) -ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` + // (String) + ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -TLSMode []ConnectionOptionsOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []ConnectionOptionsOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type ConnectionConnectionOptionsOnPremiseParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` - -// (Number) -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []ConnectionOptionsOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []ConnectionOptionsOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters struct { - } - type ConnectionConnectionOptionsOnPremiseTLSModeDisabledObservation struct { - } - type ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters struct { - } - type ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type ConnectionConnectionOptionsOnPremiseTLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ConnectionConnectionOptionsOnPremiseTLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ConnectionConnectionOptionsOnPremiseTLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ConnectionConnectionOptionsParameters struct { + // (String) + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []ConnectionOptionsOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []ConnectionOptionsOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []ConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []ConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (String) -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type ConnectionConnectionOptionsPasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type ConnectionConnectionOptionsPasswordObservation struct { - } - type ConnectionConnectionOptionsPasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type ConnectionInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -ConnectionOptions []ConnectionOptionsInitParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ConnectionOptions []ConnectionOptionsInitParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type ConnectionObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -ConnectionOptions []ConnectionOptionsObservation `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ConnectionOptions []ConnectionOptionsObservation `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type ConnectionOnPremiseInitParameters struct { + // (List of String) + BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` -// (List of String) -BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []ConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []ConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type ConnectionOnPremiseObservation struct { + // (List of String) + BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` -// (List of String) -BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []ConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []ConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type ConnectionOnPremiseParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []ConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []ConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type ConnectionOnPremiseTLSModeDisabledInitParameters struct { - } - type ConnectionOnPremiseTLSModeDisabledObservation struct { - } - type ConnectionOnPremiseTLSModeDisabledParameters struct { - } - type ConnectionOnPremiseTLSModeEnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type ConnectionOnPremiseTLSModeEnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type ConnectionOnPremiseTLSModeEnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type ConnectionOnPremiseTLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []OnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []OnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []OnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []OnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ConnectionOnPremiseTLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []OnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []OnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []OnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []OnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ConnectionOnPremiseTLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []OnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []OnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []OnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []OnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ConnectionOptionsInitParameters struct { + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` - -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -OnPremise []OnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []OnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []PasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []PasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type ConnectionOptionsObservation struct { + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` - -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -OnPremise []OnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []OnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []PasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []PasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type ConnectionOptionsOnPremiseInitParameters struct { + // (Number) + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` -// (Number) -HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` - -// (Number) -NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` + // (Number) + NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` -// (Block List) (see below for nested schema) -Shards []OnPremiseShardsInitParameters `json:"shards,omitempty" tf:"shards,omitempty"` + // (Block List) (see below for nested schema) + Shards []OnPremiseShardsInitParameters `json:"shards,omitempty" tf:"shards,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -TLSMode []OnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []OnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type ConnectionOptionsOnPremiseObservation struct { + // (Number) + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` -// (Number) -HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + // (Number) + NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` -// (Number) -NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` + // (Block List) (see below for nested schema) + Shards []OnPremiseShardsObservation `json:"shards,omitempty" tf:"shards,omitempty"` -// (Block List) (see below for nested schema) -Shards []OnPremiseShardsObservation `json:"shards,omitempty" tf:"shards,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []OnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []OnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type ConnectionOptionsOnPremiseParameters struct { + // (Number) + // +kubebuilder:validation:Optional + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Shards []OnPremiseShardsParameters `json:"shards,omitempty" tf:"shards,omitempty"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Shards []OnPremiseShardsParameters `json:"shards,omitempty" tf:"shards,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []OnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []OnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type ConnectionOptionsOnPremiseTLSModeDisabledInitParameters struct { - } - type ConnectionOptionsOnPremiseTLSModeDisabledObservation struct { - } - type ConnectionOptionsOnPremiseTLSModeDisabledParameters struct { - } - type ConnectionOptionsOnPremiseTLSModeEnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type ConnectionOptionsOnPremiseTLSModeEnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type ConnectionOptionsOnPremiseTLSModeEnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type ConnectionOptionsOnPremiseTLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []ConnectionOptionsOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []ConnectionOptionsOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []ConnectionOptionsOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []ConnectionOptionsOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ConnectionOptionsOnPremiseTLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []ConnectionOptionsOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []ConnectionOptionsOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []ConnectionOptionsOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []ConnectionOptionsOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ConnectionOptionsOnPremiseTLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []ConnectionOptionsOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []ConnectionOptionsOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []ConnectionOptionsOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []ConnectionOptionsOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ConnectionOptionsParameters struct { + // (String) + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []OnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []OnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []PasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []PasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (String) -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type ConnectionOptionsPasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type ConnectionOptionsPasswordObservation struct { - } - type ConnectionOptionsPasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type ConnectionParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ConnectionOptions []ConnectionOptionsParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ConnectionOptions []ConnectionOptionsParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type CustomMappingInitParameters struct { + // (String) + ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` -// (String) -ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` - -// (Block List) (see below for nested schema) -Mapping []MappingInitParameters `json:"mapping,omitempty" tf:"mapping,omitempty"` + // (Block List) (see below for nested schema) + Mapping []MappingInitParameters `json:"mapping,omitempty" tf:"mapping,omitempty"` } - type CustomMappingObservation struct { + // (String) + ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` -// (String) -ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` - -// (Block List) (see below for nested schema) -Mapping []MappingObservation `json:"mapping,omitempty" tf:"mapping,omitempty"` + // (Block List) (see below for nested schema) + Mapping []MappingObservation `json:"mapping,omitempty" tf:"mapping,omitempty"` } - type CustomMappingParameters struct { + // (String) + // +kubebuilder:validation:Optional + ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` - -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Mapping []MappingParameters `json:"mapping,omitempty" tf:"mapping,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Mapping []MappingParameters `json:"mapping,omitempty" tf:"mapping,omitempty"` } - type DataSchemaFieldsFieldsInitParameters struct { + // (Boolean) + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *bool `json:"key,omitempty" tf:"key,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (Boolean) + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (Boolean) -Required *bool `json:"required,omitempty" tf:"required,omitempty"` - -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type DataSchemaFieldsFieldsObservation struct { + // (Boolean) + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *bool `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (Boolean) -Required *bool `json:"required,omitempty" tf:"required,omitempty"` + // (Boolean) + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type DataSchemaFieldsFieldsParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Key *bool `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Required *bool `json:"required,omitempty" tf:"required,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type DataSchemaFieldsInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -Fields []DataSchemaFieldsFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Fields []DataSchemaFieldsFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` } - type DataSchemaFieldsObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -Fields []DataSchemaFieldsFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Fields []DataSchemaFieldsFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` } - type DataSchemaFieldsParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Fields []DataSchemaFieldsFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Fields []DataSchemaFieldsFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` } - type DataSchemaInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Fields []FieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Fields []FieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type DataSchemaObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Fields []FieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Fields []FieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type DataSchemaParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Fields []FieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Fields []FieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + // +kubebuilder:validation:Optional + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type DisabledInitParameters struct { - } - type DisabledObservation struct { - } - type DisabledParameters struct { - } - type EnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type EnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type EnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type EndpointInitParameters struct { + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // (Map of String) + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// (Map of String) -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Settings []SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Settings []SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` } - type EndpointObservation struct { + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// (String) -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // (String) + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// (String) The ID of this resource. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// (Map of String) -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // (Map of String) + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Settings []SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Settings []SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` } - type EndpointParameters struct { + // (String) + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// (Map of String) -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // (Map of String) + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Settings []SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Settings []SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` } - type ExcludedCollectionsInitParameters struct { + // (String) + CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` -// (String) -CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` - -// (String) -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + // (String) + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` } - type ExcludedCollectionsObservation struct { + // (String) + CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` -// (String) -CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` - -// (String) -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + // (String) + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` } - type ExcludedCollectionsParameters struct { + // (String) + // +kubebuilder:validation:Optional + CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` } - type FieldsFieldsInitParameters struct { + // (Boolean) + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *bool `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (Boolean) -Required *bool `json:"required,omitempty" tf:"required,omitempty"` + // (Boolean) + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type FieldsFieldsObservation struct { + // (Boolean) + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *bool `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (Boolean) -Required *bool `json:"required,omitempty" tf:"required,omitempty"` + // (Boolean) + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type FieldsFieldsParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Key *bool `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Required *bool `json:"required,omitempty" tf:"required,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type FieldsInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -Fields []FieldsFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Fields []FieldsFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` } - type FieldsObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -Fields []FieldsFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Fields []FieldsFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` } - type FieldsParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Fields []FieldsFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Fields []FieldsFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` } - type JSONParserDataSchemaFieldsFieldsInitParameters struct { + // (Boolean) + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *bool `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (Boolean) -Required *bool `json:"required,omitempty" tf:"required,omitempty"` + // (Boolean) + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type JSONParserDataSchemaFieldsFieldsObservation struct { + // (Boolean) + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *bool `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (Boolean) -Required *bool `json:"required,omitempty" tf:"required,omitempty"` + // (Boolean) + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type JSONParserDataSchemaFieldsFieldsParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Key *bool `json:"key,omitempty" tf:"key,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Required *bool `json:"required,omitempty" tf:"required,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type JSONParserDataSchemaFieldsInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -Fields []JSONParserDataSchemaFieldsFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Fields []JSONParserDataSchemaFieldsFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` } - type JSONParserDataSchemaFieldsObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -Fields []JSONParserDataSchemaFieldsFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Fields []JSONParserDataSchemaFieldsFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` } - type JSONParserDataSchemaFieldsParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Fields []JSONParserDataSchemaFieldsFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Fields []JSONParserDataSchemaFieldsFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` } - type JSONParserDataSchemaInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Fields []JSONParserDataSchemaFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Fields []JSONParserDataSchemaFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type JSONParserDataSchemaObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Fields []JSONParserDataSchemaFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Fields []JSONParserDataSchemaFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type JSONParserDataSchemaParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Fields []JSONParserDataSchemaFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Fields []JSONParserDataSchemaFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + // +kubebuilder:validation:Optional + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type JSONParserInitParameters struct { + // (Boolean) + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -DataSchema []DataSchemaInitParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + DataSchema []DataSchemaInitParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Boolean) -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` + // (Boolean) + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type JSONParserObservation struct { + // (Boolean) + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -DataSchema []DataSchemaObservation `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + DataSchema []DataSchemaObservation `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Boolean) -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` + // (Boolean) + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type JSONParserParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -DataSchema []DataSchemaParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + DataSchema []DataSchemaParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type KafkaSourceConnectionInitParameters struct { + // (String) + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// (String) -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []ConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []ConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type KafkaSourceConnectionObservation struct { + // (String) + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// (String) -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []ConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []ConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type KafkaSourceConnectionParameters struct { + // (String) + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []ConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []ConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type KafkaSourceInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Auth []AuthInitParameters `json:"auth,omitempty" tf:"auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Auth []AuthInitParameters `json:"auth,omitempty" tf:"auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Connection []KafkaSourceConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []KafkaSourceConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Parser []ParserInitParameters `json:"parser,omitempty" tf:"parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Parser []ParserInitParameters `json:"parser,omitempty" tf:"parser,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (String) -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // (String) + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` -// (List of String) -TopicNames []*string `json:"topicNames,omitempty" tf:"topic_names,omitempty"` + // (List of String) + TopicNames []*string `json:"topicNames,omitempty" tf:"topic_names,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Transformer []TransformerInitParameters `json:"transformer,omitempty" tf:"transformer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Transformer []TransformerInitParameters `json:"transformer,omitempty" tf:"transformer,omitempty"` } - type KafkaSourceObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Auth []AuthObservation `json:"auth,omitempty" tf:"auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Auth []AuthObservation `json:"auth,omitempty" tf:"auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Connection []KafkaSourceConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []KafkaSourceConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Parser []ParserObservation `json:"parser,omitempty" tf:"parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Parser []ParserObservation `json:"parser,omitempty" tf:"parser,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (String) -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // (String) + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` -// (List of String) -TopicNames []*string `json:"topicNames,omitempty" tf:"topic_names,omitempty"` + // (List of String) + TopicNames []*string `json:"topicNames,omitempty" tf:"topic_names,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Transformer []TransformerObservation `json:"transformer,omitempty" tf:"transformer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Transformer []TransformerObservation `json:"transformer,omitempty" tf:"transformer,omitempty"` } - type KafkaSourceParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Auth []AuthParameters `json:"auth,omitempty" tf:"auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Auth []AuthParameters `json:"auth,omitempty" tf:"auth,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []KafkaSourceConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []KafkaSourceConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Parser []ParserParameters `json:"parser,omitempty" tf:"parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Parser []ParserParameters `json:"parser,omitempty" tf:"parser,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + // +kubebuilder:validation:Optional + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + TopicNames []*string `json:"topicNames,omitempty" tf:"topic_names,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -TopicNames []*string `json:"topicNames,omitempty" tf:"topic_names,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Transformer []TransformerParameters `json:"transformer,omitempty" tf:"transformer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Transformer []TransformerParameters `json:"transformer,omitempty" tf:"transformer,omitempty"` } - type KafkaTargetAuthInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + NoAuth []AuthNoAuthInitParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -NoAuth []AuthNoAuthInitParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Sasl []AuthSaslInitParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Sasl []AuthSaslInitParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` } - type KafkaTargetAuthObservation struct { + // (Block List, Max: 1) (see below for nested schema) + NoAuth []AuthNoAuthParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -NoAuth []AuthNoAuthParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Sasl []AuthSaslObservation `json:"sasl,omitempty" tf:"sasl,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Sasl []AuthSaslObservation `json:"sasl,omitempty" tf:"sasl,omitempty"` } - type KafkaTargetAuthParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + NoAuth []AuthNoAuthParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -NoAuth []AuthNoAuthParameters `json:"noAuth,omitempty" tf:"no_auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Sasl []AuthSaslParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Sasl []AuthSaslParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` } - type KafkaTargetConnectionInitParameters struct { + // (String) + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// (String) -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []KafkaTargetConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []KafkaTargetConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type KafkaTargetConnectionObservation struct { + // (String) + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// (String) -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []KafkaTargetConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []KafkaTargetConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type KafkaTargetConnectionOnPremiseInitParameters struct { + // (List of String) + BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` -// (List of String) -BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []KafkaTargetConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []KafkaTargetConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type KafkaTargetConnectionOnPremiseObservation struct { + // (List of String) + BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` -// (List of String) -BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -TLSMode []KafkaTargetConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []KafkaTargetConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type KafkaTargetConnectionOnPremiseParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -BrokerUrls []*string `json:"brokerUrls,omitempty" tf:"broker_urls,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []KafkaTargetConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []KafkaTargetConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type KafkaTargetConnectionOnPremiseTLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []ConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []ConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []ConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []ConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type KafkaTargetConnectionOnPremiseTLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []ConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []ConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []ConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []ConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type KafkaTargetConnectionOnPremiseTLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []ConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []ConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []ConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []ConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type KafkaTargetConnectionParameters struct { + // (String) + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []KafkaTargetConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []KafkaTargetConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type KafkaTargetInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Auth []KafkaTargetAuthInitParameters `json:"auth,omitempty" tf:"auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Auth []KafkaTargetAuthInitParameters `json:"auth,omitempty" tf:"auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Connection []KafkaTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []KafkaTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Serializer []SerializerInitParameters `json:"serializer,omitempty" tf:"serializer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Serializer []SerializerInitParameters `json:"serializer,omitempty" tf:"serializer,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -TopicSettings []TopicSettingsInitParameters `json:"topicSettings,omitempty" tf:"topic_settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TopicSettings []TopicSettingsInitParameters `json:"topicSettings,omitempty" tf:"topic_settings,omitempty"` } - type KafkaTargetObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Auth []KafkaTargetAuthObservation `json:"auth,omitempty" tf:"auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Auth []KafkaTargetAuthObservation `json:"auth,omitempty" tf:"auth,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Connection []KafkaTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []KafkaTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Serializer []SerializerObservation `json:"serializer,omitempty" tf:"serializer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Serializer []SerializerObservation `json:"serializer,omitempty" tf:"serializer,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -TopicSettings []TopicSettingsObservation `json:"topicSettings,omitempty" tf:"topic_settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TopicSettings []TopicSettingsObservation `json:"topicSettings,omitempty" tf:"topic_settings,omitempty"` } - type KafkaTargetParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Auth []KafkaTargetAuthParameters `json:"auth,omitempty" tf:"auth,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Auth []KafkaTargetAuthParameters `json:"auth,omitempty" tf:"auth,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []KafkaTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []KafkaTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Serializer []SerializerParameters `json:"serializer,omitempty" tf:"serializer,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Serializer []SerializerParameters `json:"serializer,omitempty" tf:"serializer,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TopicSettings []TopicSettingsParameters `json:"topicSettings,omitempty" tf:"topic_settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TopicSettings []TopicSettingsParameters `json:"topicSettings,omitempty" tf:"topic_settings,omitempty"` } - type MappingInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + ColumnValue []ColumnValueInitParameters `json:"columnValue,omitempty" tf:"column_value,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ColumnValue []ColumnValueInitParameters `json:"columnValue,omitempty" tf:"column_value,omitempty"` - -// (String) -ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + // (String) + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` } - type MappingObservation struct { + // (Block List, Max: 1) (see below for nested schema) + ColumnValue []ColumnValueObservation `json:"columnValue,omitempty" tf:"column_value,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ColumnValue []ColumnValueObservation `json:"columnValue,omitempty" tf:"column_value,omitempty"` - -// (String) -ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + // (String) + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` } - type MappingParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ColumnValue []ColumnValueParameters `json:"columnValue,omitempty" tf:"column_value,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ColumnValue []ColumnValueParameters `json:"columnValue,omitempty" tf:"column_value,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` } - type MetrikaSourceInitParameters struct { + // (List of Number) + CounterIds []*float64 `json:"counterIds,omitempty" tf:"counter_ids,omitempty"` -// (List of Number) -CounterIds []*float64 `json:"counterIds,omitempty" tf:"counter_ids,omitempty"` - -// (Block List) (see below for nested schema) -Streams []StreamsInitParameters `json:"streams,omitempty" tf:"streams,omitempty"` + // (Block List) (see below for nested schema) + Streams []StreamsInitParameters `json:"streams,omitempty" tf:"streams,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Token []TokenInitParameters `json:"token,omitempty" tf:"token,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Token []TokenInitParameters `json:"token,omitempty" tf:"token,omitempty"` } - type MetrikaSourceObservation struct { + // (List of Number) + CounterIds []*float64 `json:"counterIds,omitempty" tf:"counter_ids,omitempty"` -// (List of Number) -CounterIds []*float64 `json:"counterIds,omitempty" tf:"counter_ids,omitempty"` + // (Block List) (see below for nested schema) + Streams []StreamsObservation `json:"streams,omitempty" tf:"streams,omitempty"` -// (Block List) (see below for nested schema) -Streams []StreamsObservation `json:"streams,omitempty" tf:"streams,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Token []TokenParameters `json:"token,omitempty" tf:"token,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Token []TokenParameters `json:"token,omitempty" tf:"token,omitempty"` } - type MetrikaSourceParameters struct { + // (List of Number) + // +kubebuilder:validation:Optional + CounterIds []*float64 `json:"counterIds,omitempty" tf:"counter_ids,omitempty"` -// (List of Number) -// +kubebuilder:validation:Optional -CounterIds []*float64 `json:"counterIds,omitempty" tf:"counter_ids,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Streams []StreamsParameters `json:"streams,omitempty" tf:"streams,omitempty"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Streams []StreamsParameters `json:"streams,omitempty" tf:"streams,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Token []TokenParameters `json:"token,omitempty" tf:"token,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Token []TokenParameters `json:"token,omitempty" tf:"token,omitempty"` } - type MongoSourceConnectionConnectionOptionsInitParameters struct { + // (String) + AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` -// (String) -AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MongodbCluster + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MongodbCluster -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a MongodbCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a MongodbCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a MongodbCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a MongodbCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []ConnectionConnectionOptionsOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -OnPremise []ConnectionConnectionOptionsOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []ConnectionConnectionOptionsPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []ConnectionConnectionOptionsPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type MongoSourceConnectionConnectionOptionsObservation struct { + // (String) + AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` -// (String) -AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []ConnectionConnectionOptionsOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -OnPremise []ConnectionConnectionOptionsOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []ConnectionConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []ConnectionConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type MongoSourceConnectionConnectionOptionsParameters struct { + // (String) + // +kubebuilder:validation:Optional + AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` -// (String) -// +kubebuilder:validation:Optional -AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MongodbCluster + // +kubebuilder:validation:Optional + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MongodbCluster -// +kubebuilder:validation:Optional -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a MongodbCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a MongodbCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a MongodbCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a MongodbCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []ConnectionConnectionOptionsOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []ConnectionConnectionOptionsOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []ConnectionConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []ConnectionConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type MongoSourceConnectionInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -ConnectionOptions []MongoSourceConnectionConnectionOptionsInitParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ConnectionOptions []MongoSourceConnectionConnectionOptionsInitParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type MongoSourceConnectionObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -ConnectionOptions []MongoSourceConnectionConnectionOptionsObservation `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ConnectionOptions []MongoSourceConnectionConnectionOptionsObservation `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type MongoSourceConnectionParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ConnectionOptions []MongoSourceConnectionConnectionOptionsParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ConnectionOptions []MongoSourceConnectionConnectionOptionsParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type MongoSourceInitParameters struct { + // (Block List) (see below for nested schema) + Collections []CollectionsInitParameters `json:"collections,omitempty" tf:"collections,omitempty"` -// (Block List) (see below for nested schema) -Collections []CollectionsInitParameters `json:"collections,omitempty" tf:"collections,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []MongoSourceConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []MongoSourceConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List) (see below for nested schema) + ExcludedCollections []ExcludedCollectionsInitParameters `json:"excludedCollections,omitempty" tf:"excluded_collections,omitempty"` -// (Block List) (see below for nested schema) -ExcludedCollections []ExcludedCollectionsInitParameters `json:"excludedCollections,omitempty" tf:"excluded_collections,omitempty"` + // (Boolean) + SecondaryPreferredMode *bool `json:"secondaryPreferredMode,omitempty" tf:"secondary_preferred_mode,omitempty"` -// (Boolean) -SecondaryPreferredMode *bool `json:"secondaryPreferredMode,omitempty" tf:"secondary_preferred_mode,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type MongoSourceObservation struct { + // (Block List) (see below for nested schema) + Collections []CollectionsObservation `json:"collections,omitempty" tf:"collections,omitempty"` -// (Block List) (see below for nested schema) -Collections []CollectionsObservation `json:"collections,omitempty" tf:"collections,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []MongoSourceConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []MongoSourceConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List) (see below for nested schema) + ExcludedCollections []ExcludedCollectionsObservation `json:"excludedCollections,omitempty" tf:"excluded_collections,omitempty"` -// (Block List) (see below for nested schema) -ExcludedCollections []ExcludedCollectionsObservation `json:"excludedCollections,omitempty" tf:"excluded_collections,omitempty"` + // (Boolean) + SecondaryPreferredMode *bool `json:"secondaryPreferredMode,omitempty" tf:"secondary_preferred_mode,omitempty"` -// (Boolean) -SecondaryPreferredMode *bool `json:"secondaryPreferredMode,omitempty" tf:"secondary_preferred_mode,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type MongoSourceParameters struct { + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Collections []CollectionsParameters `json:"collections,omitempty" tf:"collections,omitempty"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Collections []CollectionsParameters `json:"collections,omitempty" tf:"collections,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []MongoSourceConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []MongoSourceConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + ExcludedCollections []ExcludedCollectionsParameters `json:"excludedCollections,omitempty" tf:"excluded_collections,omitempty"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -ExcludedCollections []ExcludedCollectionsParameters `json:"excludedCollections,omitempty" tf:"excluded_collections,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + SecondaryPreferredMode *bool `json:"secondaryPreferredMode,omitempty" tf:"secondary_preferred_mode,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -SecondaryPreferredMode *bool `json:"secondaryPreferredMode,omitempty" tf:"secondary_preferred_mode,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type MongoTargetConnectionConnectionOptionsInitParameters struct { + // (String) + AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` -// (String) -AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MongodbCluster + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MongodbCluster -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a MongodbCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a MongodbCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a MongodbCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a MongodbCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []MongoTargetConnectionConnectionOptionsOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -OnPremise []MongoTargetConnectionConnectionOptionsOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []MongoTargetConnectionConnectionOptionsPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []MongoTargetConnectionConnectionOptionsPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type MongoTargetConnectionConnectionOptionsObservation struct { + // (String) + AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` -// (String) -AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []MongoTargetConnectionConnectionOptionsOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -OnPremise []MongoTargetConnectionConnectionOptionsOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []MongoTargetConnectionConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []MongoTargetConnectionConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type MongoTargetConnectionConnectionOptionsOnPremiseInitParameters struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` -// (String) -ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []ConnectionConnectionOptionsOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []ConnectionConnectionOptionsOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type MongoTargetConnectionConnectionOptionsOnPremiseObservation struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` -// (String) -ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []ConnectionConnectionOptionsOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []ConnectionConnectionOptionsOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type MongoTargetConnectionConnectionOptionsOnPremiseParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ReplicaSet *string `json:"replicaSet,omitempty" tf:"replica_set,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []ConnectionConnectionOptionsOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []ConnectionConnectionOptionsOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type MongoTargetConnectionConnectionOptionsParameters struct { + // (String) + // +kubebuilder:validation:Optional + AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` -// (String) -// +kubebuilder:validation:Optional -AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MongodbCluster + // +kubebuilder:validation:Optional + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MongodbCluster -// +kubebuilder:validation:Optional -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a MongodbCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a MongodbCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a MongodbCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a MongodbCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []MongoTargetConnectionConnectionOptionsOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []MongoTargetConnectionConnectionOptionsOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []MongoTargetConnectionConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []MongoTargetConnectionConnectionOptionsPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type MongoTargetConnectionConnectionOptionsPasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type MongoTargetConnectionConnectionOptionsPasswordObservation struct { - } - type MongoTargetConnectionConnectionOptionsPasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type MongoTargetConnectionInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -ConnectionOptions []MongoTargetConnectionConnectionOptionsInitParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ConnectionOptions []MongoTargetConnectionConnectionOptionsInitParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type MongoTargetConnectionObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -ConnectionOptions []MongoTargetConnectionConnectionOptionsObservation `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ConnectionOptions []MongoTargetConnectionConnectionOptionsObservation `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type MongoTargetConnectionParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ConnectionOptions []MongoTargetConnectionConnectionOptionsParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ConnectionOptions []MongoTargetConnectionConnectionOptionsParameters `json:"connectionOptions,omitempty" tf:"connection_options,omitempty"` } - type MongoTargetInitParameters struct { + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []MongoTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []MongoTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type MongoTargetObservation struct { + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []MongoTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []MongoTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type MongoTargetParameters struct { + // (String) + // +kubebuilder:validation:Optional + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -// +kubebuilder:validation:Optional -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []MongoTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []MongoTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type MySQLSourceConnectionInitParameters struct { + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLCluster + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLCluster -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a MySQLCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a MySQLCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a MySQLCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a MySQLCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []MySQLSourceConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []MySQLSourceConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type MySQLSourceConnectionObservation struct { + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []MySQLSourceConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []MySQLSourceConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type MySQLSourceConnectionOnPremiseInitParameters struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []MySQLSourceConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []MySQLSourceConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type MySQLSourceConnectionOnPremiseObservation struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []MySQLSourceConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []MySQLSourceConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type MySQLSourceConnectionOnPremiseParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []MySQLSourceConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []MySQLSourceConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters struct { - } - type MySQLSourceConnectionOnPremiseTLSModeDisabledObservation struct { - } - type MySQLSourceConnectionOnPremiseTLSModeDisabledParameters struct { - } - type MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type MySQLSourceConnectionOnPremiseTLSModeEnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type MySQLSourceConnectionOnPremiseTLSModeEnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type MySQLSourceConnectionOnPremiseTLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type MySQLSourceConnectionOnPremiseTLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []MySQLSourceConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []MySQLSourceConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []MySQLSourceConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []MySQLSourceConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type MySQLSourceConnectionOnPremiseTLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []MySQLSourceConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []MySQLSourceConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []MySQLSourceConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []MySQLSourceConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type MySQLSourceConnectionParameters struct { + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLCluster + // +kubebuilder:validation:Optional + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLCluster -// +kubebuilder:validation:Optional -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a MySQLCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a MySQLCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a MySQLCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a MySQLCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []MySQLSourceConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []MySQLSourceConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type MySQLSourceInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Connection []MySQLSourceConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []MySQLSourceConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Reference to a MySQLDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` -// Reference to a MySQLDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` + // Selector for a MySQLDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` -// Selector for a MySQLDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` + // (List of String) + ExcludeTablesRegex []*string `json:"excludeTablesRegex,omitempty" tf:"exclude_tables_regex,omitempty"` -// (List of String) -ExcludeTablesRegex []*string `json:"excludeTablesRegex,omitempty" tf:"exclude_tables_regex,omitempty"` + // (List of String) + IncludeTablesRegex []*string `json:"includeTablesRegex,omitempty" tf:"include_tables_regex,omitempty"` -// (List of String) -IncludeTablesRegex []*string `json:"includeTablesRegex,omitempty" tf:"include_tables_regex,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ObjectTransferSettings []ObjectTransferSettingsInitParameters `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ObjectTransferSettings []ObjectTransferSettingsInitParameters `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []MySQLSourcePasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []MySQLSourcePasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` + // (List of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // References to SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + // (String) + ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` -// (String) -ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` + // (String) + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` -// (String) -Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLUser + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + User *string `json:"user,omitempty" tf:"user,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLUser -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -User *string `json:"user,omitempty" tf:"user,omitempty"` + // Reference to a MySQLUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` -// Reference to a MySQLUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` - -// Selector for a MySQLUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` + // Selector for a MySQLUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` } - type MySQLSourceObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Connection []MySQLSourceConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []MySQLSourceConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (List of String) + ExcludeTablesRegex []*string `json:"excludeTablesRegex,omitempty" tf:"exclude_tables_regex,omitempty"` -// (List of String) -ExcludeTablesRegex []*string `json:"excludeTablesRegex,omitempty" tf:"exclude_tables_regex,omitempty"` + // (List of String) + IncludeTablesRegex []*string `json:"includeTablesRegex,omitempty" tf:"include_tables_regex,omitempty"` -// (List of String) -IncludeTablesRegex []*string `json:"includeTablesRegex,omitempty" tf:"include_tables_regex,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ObjectTransferSettings []ObjectTransferSettingsObservation `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ObjectTransferSettings []ObjectTransferSettingsObservation `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []MySQLSourcePasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []MySQLSourcePasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` -// (String) -ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` + // (String) + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` -// (String) -Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type MySQLSourceParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []MySQLSourceConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []MySQLSourceConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Reference to a MySQLDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` -// Reference to a MySQLDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` + // Selector for a MySQLDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` -// Selector for a MySQLDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTablesRegex []*string `json:"excludeTablesRegex,omitempty" tf:"exclude_tables_regex,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTablesRegex []*string `json:"excludeTablesRegex,omitempty" tf:"exclude_tables_regex,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTablesRegex []*string `json:"includeTablesRegex,omitempty" tf:"include_tables_regex,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -IncludeTablesRegex []*string `json:"includeTablesRegex,omitempty" tf:"include_tables_regex,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ObjectTransferSettings []ObjectTransferSettingsParameters `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ObjectTransferSettings []ObjectTransferSettingsParameters `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []MySQLSourcePasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []MySQLSourcePasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (List of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // References to SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + // (String) + // +kubebuilder:validation:Optional + ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLUser + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLUser -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // Reference to a MySQLUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` -// Reference to a MySQLUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` - -// Selector for a MySQLUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` + // Selector for a MySQLUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` } - type MySQLSourcePasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type MySQLSourcePasswordObservation struct { - } - type MySQLSourcePasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type MySQLTargetConnectionInitParameters struct { + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLCluster + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLCluster -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a MySQLCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a MySQLCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a MySQLCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a MySQLCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []MySQLTargetConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []MySQLTargetConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type MySQLTargetConnectionObservation struct { + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []MySQLTargetConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []MySQLTargetConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type MySQLTargetConnectionOnPremiseInitParameters struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []MySQLTargetConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []MySQLTargetConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type MySQLTargetConnectionOnPremiseObservation struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []MySQLTargetConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []MySQLTargetConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type MySQLTargetConnectionOnPremiseParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []MySQLTargetConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []MySQLTargetConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters struct { - } - type MySQLTargetConnectionOnPremiseTLSModeDisabledObservation struct { - } - type MySQLTargetConnectionOnPremiseTLSModeDisabledParameters struct { - } - type MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type MySQLTargetConnectionOnPremiseTLSModeEnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type MySQLTargetConnectionOnPremiseTLSModeEnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type MySQLTargetConnectionOnPremiseTLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type MySQLTargetConnectionOnPremiseTLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []MySQLTargetConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []MySQLTargetConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []MySQLTargetConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []MySQLTargetConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type MySQLTargetConnectionOnPremiseTLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []MySQLTargetConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []MySQLTargetConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []MySQLTargetConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []MySQLTargetConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type MySQLTargetConnectionParameters struct { + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLCluster + // +kubebuilder:validation:Optional + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLCluster -// +kubebuilder:validation:Optional -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a MySQLCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a MySQLCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a MySQLCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a MySQLCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []MySQLTargetConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []MySQLTargetConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type MySQLTargetInitParameters struct { + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []MySQLTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []MySQLTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Reference to a MySQLDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` -// Reference to a MySQLDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` + // Selector for a MySQLDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` -// Selector for a MySQLDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` + // (Block List, Max: 1) (see below for nested schema) + Password []MySQLTargetPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []MySQLTargetPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` + // (String) + SQLMode *string `json:"sqlMode,omitempty" tf:"sql_mode,omitempty"` -// (String) -SQLMode *string `json:"sqlMode,omitempty" tf:"sql_mode,omitempty"` + // (List of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // References to SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + // (String) + ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` -// (String) -ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` + // (Boolean) + SkipConstraintChecks *bool `json:"skipConstraintChecks,omitempty" tf:"skip_constraint_checks,omitempty"` -// (Boolean) -SkipConstraintChecks *bool `json:"skipConstraintChecks,omitempty" tf:"skip_constraint_checks,omitempty"` + // (String) + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` -// (String) -Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLUser + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + User *string `json:"user,omitempty" tf:"user,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLUser -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -User *string `json:"user,omitempty" tf:"user,omitempty"` + // Reference to a MySQLUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` -// Reference to a MySQLUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` - -// Selector for a MySQLUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` + // Selector for a MySQLUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` } - type MySQLTargetObservation struct { + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []MySQLTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []MySQLTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []MySQLTargetPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []MySQLTargetPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (String) + SQLMode *string `json:"sqlMode,omitempty" tf:"sql_mode,omitempty"` -// (String) -SQLMode *string `json:"sqlMode,omitempty" tf:"sql_mode,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` -// (String) -ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` + // (Boolean) + SkipConstraintChecks *bool `json:"skipConstraintChecks,omitempty" tf:"skip_constraint_checks,omitempty"` -// (Boolean) -SkipConstraintChecks *bool `json:"skipConstraintChecks,omitempty" tf:"skip_constraint_checks,omitempty"` + // (String) + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` -// (String) -Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type MySQLTargetParameters struct { + // (String) + // +kubebuilder:validation:Optional + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -// +kubebuilder:validation:Optional -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []MySQLTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []MySQLTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Reference to a MySQLDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` -// Reference to a MySQLDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` + // Selector for a MySQLDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` -// Selector for a MySQLDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []MySQLTargetPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []MySQLTargetPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (String) + // +kubebuilder:validation:Optional + SQLMode *string `json:"sqlMode,omitempty" tf:"sql_mode,omitempty"` -// (String) -// +kubebuilder:validation:Optional -SQLMode *string `json:"sqlMode,omitempty" tf:"sql_mode,omitempty"` + // (List of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // References to SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + // (String) + // +kubebuilder:validation:Optional + ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ServiceDatabase *string `json:"serviceDatabase,omitempty" tf:"service_database,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + SkipConstraintChecks *bool `json:"skipConstraintChecks,omitempty" tf:"skip_constraint_checks,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -SkipConstraintChecks *bool `json:"skipConstraintChecks,omitempty" tf:"skip_constraint_checks,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLUser + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLUser -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // Reference to a MySQLUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` -// Reference to a MySQLUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` - -// Selector for a MySQLUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` + // Selector for a MySQLUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` } - type MySQLTargetPasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type MySQLTargetPasswordObservation struct { - } - type MySQLTargetPasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type NoAuthInitParameters struct { - } - type NoAuthObservation struct { - } - type NoAuthParameters struct { - } - type ObjectTransferSettingsInitParameters struct { + // (String) + Routine *string `json:"routine,omitempty" tf:"routine,omitempty"` -// (String) -Routine *string `json:"routine,omitempty" tf:"routine,omitempty"` + // (String) + Tables *string `json:"tables,omitempty" tf:"tables,omitempty"` -// (String) -Tables *string `json:"tables,omitempty" tf:"tables,omitempty"` + // (String) + Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` -// (String) -Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` - -// (String) -View *string `json:"view,omitempty" tf:"view,omitempty"` + // (String) + View *string `json:"view,omitempty" tf:"view,omitempty"` } - type ObjectTransferSettingsObservation struct { + // (String) + Routine *string `json:"routine,omitempty" tf:"routine,omitempty"` -// (String) -Routine *string `json:"routine,omitempty" tf:"routine,omitempty"` + // (String) + Tables *string `json:"tables,omitempty" tf:"tables,omitempty"` -// (String) -Tables *string `json:"tables,omitempty" tf:"tables,omitempty"` + // (String) + Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` -// (String) -Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` - -// (String) -View *string `json:"view,omitempty" tf:"view,omitempty"` + // (String) + View *string `json:"view,omitempty" tf:"view,omitempty"` } - type ObjectTransferSettingsParameters struct { + // (String) + // +kubebuilder:validation:Optional + Routine *string `json:"routine,omitempty" tf:"routine,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Routine *string `json:"routine,omitempty" tf:"routine,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Tables *string `json:"tables,omitempty" tf:"tables,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Tables *string `json:"tables,omitempty" tf:"tables,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -View *string `json:"view,omitempty" tf:"view,omitempty"` + // (String) + // +kubebuilder:validation:Optional + View *string `json:"view,omitempty" tf:"view,omitempty"` } - type OnPremiseInitParameters struct { + // (Number) + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` -// (Number) -HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + // (Number) + NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` -// (Number) -NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` + // (Block List) (see below for nested schema) + Shards []ShardsInitParameters `json:"shards,omitempty" tf:"shards,omitempty"` -// (Block List) (see below for nested schema) -Shards []ShardsInitParameters `json:"shards,omitempty" tf:"shards,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []TLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []TLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type OnPremiseObservation struct { + // (Number) + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` -// (Number) -HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + // (Number) + NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` -// (Number) -NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` + // (Block List) (see below for nested schema) + Shards []ShardsObservation `json:"shards,omitempty" tf:"shards,omitempty"` -// (Block List) (see below for nested schema) -Shards []ShardsObservation `json:"shards,omitempty" tf:"shards,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []TLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []TLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type OnPremiseParameters struct { + // (Number) + // +kubebuilder:validation:Optional + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -NativePort *float64 `json:"nativePort,omitempty" tf:"native_port,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Shards []ShardsParameters `json:"shards,omitempty" tf:"shards,omitempty"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Shards []ShardsParameters `json:"shards,omitempty" tf:"shards,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []TLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []TLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type OnPremiseShardsInitParameters struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type OnPremiseShardsObservation struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type OnPremiseShardsParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type OnPremiseTLSModeDisabledInitParameters struct { - } - type OnPremiseTLSModeDisabledObservation struct { - } - type OnPremiseTLSModeDisabledParameters struct { - } - type OnPremiseTLSModeEnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type OnPremiseTLSModeEnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type OnPremiseTLSModeEnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type OnPremiseTLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []TLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []TLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []TLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []TLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type OnPremiseTLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []TLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []TLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []TLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []TLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type OnPremiseTLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []TLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []TLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []TLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []TLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ParserAuditTrailsV1ParserInitParameters struct { - } - type ParserAuditTrailsV1ParserObservation struct { - } - type ParserAuditTrailsV1ParserParameters struct { - } - type ParserCloudLoggingParserInitParameters struct { - } - type ParserCloudLoggingParserObservation struct { - } - type ParserCloudLoggingParserParameters struct { - } - type ParserInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + AuditTrailsV1Parser []AuditTrailsV1ParserInitParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -AuditTrailsV1Parser []AuditTrailsV1ParserInitParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + CloudLoggingParser []CloudLoggingParserInitParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -CloudLoggingParser []CloudLoggingParserInitParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + JSONParser []JSONParserInitParameters `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -JSONParser []JSONParserInitParameters `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TskvParser []TskvParserInitParameters `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TskvParser []TskvParserInitParameters `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` } - type ParserJSONParserInitParameters struct { + // (Boolean) + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + DataSchema []JSONParserDataSchemaInitParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -DataSchema []JSONParserDataSchemaInitParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Boolean) + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` - -// (Boolean) -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type ParserJSONParserObservation struct { + // (Boolean) + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + DataSchema []JSONParserDataSchemaObservation `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -DataSchema []JSONParserDataSchemaObservation `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Boolean) + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` - -// (Boolean) -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type ParserJSONParserParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + DataSchema []JSONParserDataSchemaParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -DataSchema []JSONParserDataSchemaParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` - -// (Boolean) -// +kubebuilder:validation:Optional -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type ParserObservation struct { + // (Block List, Max: 1) (see below for nested schema) + AuditTrailsV1Parser []AuditTrailsV1ParserParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -AuditTrailsV1Parser []AuditTrailsV1ParserParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + CloudLoggingParser []CloudLoggingParserParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -CloudLoggingParser []CloudLoggingParserParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + JSONParser []JSONParserObservation `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -JSONParser []JSONParserObservation `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TskvParser []TskvParserObservation `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TskvParser []TskvParserObservation `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` } - type ParserParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + AuditTrailsV1Parser []AuditTrailsV1ParserParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -AuditTrailsV1Parser []AuditTrailsV1ParserParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + CloudLoggingParser []CloudLoggingParserParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -CloudLoggingParser []CloudLoggingParserParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + JSONParser []JSONParserParameters `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -JSONParser []JSONParserParameters `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TskvParser []TskvParserParameters `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TskvParser []TskvParserParameters `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` } - type ParserTskvParserDataSchemaInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Fields []TskvParserDataSchemaFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Fields []TskvParserDataSchemaFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type ParserTskvParserDataSchemaObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Fields []TskvParserDataSchemaFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Fields []TskvParserDataSchemaFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type ParserTskvParserDataSchemaParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Fields []TskvParserDataSchemaFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Fields []TskvParserDataSchemaFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + // +kubebuilder:validation:Optional + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type ParserTskvParserInitParameters struct { + // (Boolean) + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + DataSchema []ParserTskvParserDataSchemaInitParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -DataSchema []ParserTskvParserDataSchemaInitParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Boolean) + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` - -// (Boolean) -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type ParserTskvParserObservation struct { + // (Boolean) + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + DataSchema []ParserTskvParserDataSchemaObservation `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -DataSchema []ParserTskvParserDataSchemaObservation `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Boolean) + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` - -// (Boolean) -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type ParserTskvParserParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + DataSchema []ParserTskvParserDataSchemaParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -DataSchema []ParserTskvParserDataSchemaParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` - -// (Boolean) -// +kubebuilder:validation:Optional -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type PasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type PasswordObservation struct { - } - type PasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type PostgresSourceConnectionInitParameters struct { + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlCluster + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlCluster -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a PostgresqlCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a PostgresqlCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a PostgresqlCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a PostgresqlCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []PostgresSourceConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []PostgresSourceConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type PostgresSourceConnectionObservation struct { + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []PostgresSourceConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []PostgresSourceConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type PostgresSourceConnectionOnPremiseInitParameters struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []PostgresSourceConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []PostgresSourceConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type PostgresSourceConnectionOnPremiseObservation struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []PostgresSourceConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []PostgresSourceConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type PostgresSourceConnectionOnPremiseParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []PostgresSourceConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []PostgresSourceConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters struct { - } - type PostgresSourceConnectionOnPremiseTLSModeDisabledObservation struct { - } - type PostgresSourceConnectionOnPremiseTLSModeDisabledParameters struct { - } - type PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type PostgresSourceConnectionOnPremiseTLSModeEnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type PostgresSourceConnectionOnPremiseTLSModeEnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type PostgresSourceConnectionOnPremiseTLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type PostgresSourceConnectionOnPremiseTLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []PostgresSourceConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []PostgresSourceConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []PostgresSourceConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []PostgresSourceConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type PostgresSourceConnectionOnPremiseTLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []PostgresSourceConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []PostgresSourceConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []PostgresSourceConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []PostgresSourceConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type PostgresSourceConnectionParameters struct { + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlCluster + // +kubebuilder:validation:Optional + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlCluster -// +kubebuilder:validation:Optional -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a PostgresqlCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a PostgresqlCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a PostgresqlCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a PostgresqlCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []PostgresSourceConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []PostgresSourceConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type PostgresSourceInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Connection []PostgresSourceConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []PostgresSourceConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlDatabase + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlDatabase -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Reference to a PostgresqlDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` -// Reference to a PostgresqlDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` + // Selector for a PostgresqlDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` -// Selector for a PostgresqlDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ObjectTransferSettings []PostgresSourceObjectTransferSettingsInitParameters `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ObjectTransferSettings []PostgresSourceObjectTransferSettingsInitParameters `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []PostgresSourcePasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []PostgresSourcePasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` + // (List of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // References to SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + // (String) + ServiceSchema *string `json:"serviceSchema,omitempty" tf:"service_schema,omitempty"` -// (String) -ServiceSchema *string `json:"serviceSchema,omitempty" tf:"service_schema,omitempty"` + // (Number) + SlotGigabyteLagLimit *float64 `json:"slotGigabyteLagLimit,omitempty" tf:"slot_gigabyte_lag_limit,omitempty"` -// (Number) -SlotGigabyteLagLimit *float64 `json:"slotGigabyteLagLimit,omitempty" tf:"slot_gigabyte_lag_limit,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + User *string `json:"user,omitempty" tf:"user,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -User *string `json:"user,omitempty" tf:"user,omitempty"` + // Reference to a PostgresqlUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` -// Reference to a PostgresqlUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` - -// Selector for a PostgresqlUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` + // Selector for a PostgresqlUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` } - type PostgresSourceObjectTransferSettingsInitParameters struct { + // (String) + Cast *string `json:"cast,omitempty" tf:"cast,omitempty"` -// (String) -Cast *string `json:"cast,omitempty" tf:"cast,omitempty"` + // (String) + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` -// (String) -Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + // (String) + Constraint *string `json:"constraint,omitempty" tf:"constraint,omitempty"` -// (String) -Constraint *string `json:"constraint,omitempty" tf:"constraint,omitempty"` + // (String) + DefaultValues *string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` -// (String) -DefaultValues *string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` + // (String) + FkConstraint *string `json:"fkConstraint,omitempty" tf:"fk_constraint,omitempty"` -// (String) -FkConstraint *string `json:"fkConstraint,omitempty" tf:"fk_constraint,omitempty"` + // (String) + Function *string `json:"function,omitempty" tf:"function,omitempty"` -// (String) -Function *string `json:"function,omitempty" tf:"function,omitempty"` + // (String) + Index *string `json:"index,omitempty" tf:"index,omitempty"` -// (String) -Index *string `json:"index,omitempty" tf:"index,omitempty"` + // (String) + MaterializedView *string `json:"materializedView,omitempty" tf:"materialized_view,omitempty"` -// (String) -MaterializedView *string `json:"materializedView,omitempty" tf:"materialized_view,omitempty"` + // (String) + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` -// (String) -Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + // (String) + PrimaryKey *string `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` -// (String) -PrimaryKey *string `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` + // (String) + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` -// (String) -Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` + // (String) + Sequence *string `json:"sequence,omitempty" tf:"sequence,omitempty"` -// (String) -Sequence *string `json:"sequence,omitempty" tf:"sequence,omitempty"` + // (String) + SequenceOwnedBy *string `json:"sequenceOwnedBy,omitempty" tf:"sequence_owned_by,omitempty"` -// (String) -SequenceOwnedBy *string `json:"sequenceOwnedBy,omitempty" tf:"sequence_owned_by,omitempty"` + // (String) + SequenceSet *string `json:"sequenceSet,omitempty" tf:"sequence_set,omitempty"` -// (String) -SequenceSet *string `json:"sequenceSet,omitempty" tf:"sequence_set,omitempty"` + // (String) + Table *string `json:"table,omitempty" tf:"table,omitempty"` -// (String) -Table *string `json:"table,omitempty" tf:"table,omitempty"` + // (String) + Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` -// (String) -Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// (String) -View *string `json:"view,omitempty" tf:"view,omitempty"` + // (String) + View *string `json:"view,omitempty" tf:"view,omitempty"` } - type PostgresSourceObjectTransferSettingsObservation struct { + // (String) + Cast *string `json:"cast,omitempty" tf:"cast,omitempty"` -// (String) -Cast *string `json:"cast,omitempty" tf:"cast,omitempty"` + // (String) + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` -// (String) -Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + // (String) + Constraint *string `json:"constraint,omitempty" tf:"constraint,omitempty"` -// (String) -Constraint *string `json:"constraint,omitempty" tf:"constraint,omitempty"` + // (String) + DefaultValues *string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` -// (String) -DefaultValues *string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` + // (String) + FkConstraint *string `json:"fkConstraint,omitempty" tf:"fk_constraint,omitempty"` -// (String) -FkConstraint *string `json:"fkConstraint,omitempty" tf:"fk_constraint,omitempty"` + // (String) + Function *string `json:"function,omitempty" tf:"function,omitempty"` -// (String) -Function *string `json:"function,omitempty" tf:"function,omitempty"` + // (String) + Index *string `json:"index,omitempty" tf:"index,omitempty"` -// (String) -Index *string `json:"index,omitempty" tf:"index,omitempty"` + // (String) + MaterializedView *string `json:"materializedView,omitempty" tf:"materialized_view,omitempty"` -// (String) -MaterializedView *string `json:"materializedView,omitempty" tf:"materialized_view,omitempty"` + // (String) + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` -// (String) -Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + // (String) + PrimaryKey *string `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` -// (String) -PrimaryKey *string `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` + // (String) + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` -// (String) -Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` + // (String) + Sequence *string `json:"sequence,omitempty" tf:"sequence,omitempty"` -// (String) -Sequence *string `json:"sequence,omitempty" tf:"sequence,omitempty"` + // (String) + SequenceOwnedBy *string `json:"sequenceOwnedBy,omitempty" tf:"sequence_owned_by,omitempty"` -// (String) -SequenceOwnedBy *string `json:"sequenceOwnedBy,omitempty" tf:"sequence_owned_by,omitempty"` + // (String) + SequenceSet *string `json:"sequenceSet,omitempty" tf:"sequence_set,omitempty"` -// (String) -SequenceSet *string `json:"sequenceSet,omitempty" tf:"sequence_set,omitempty"` + // (String) + Table *string `json:"table,omitempty" tf:"table,omitempty"` -// (String) -Table *string `json:"table,omitempty" tf:"table,omitempty"` + // (String) + Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` -// (String) -Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// (String) -View *string `json:"view,omitempty" tf:"view,omitempty"` + // (String) + View *string `json:"view,omitempty" tf:"view,omitempty"` } - type PostgresSourceObjectTransferSettingsParameters struct { + // (String) + // +kubebuilder:validation:Optional + Cast *string `json:"cast,omitempty" tf:"cast,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Cast *string `json:"cast,omitempty" tf:"cast,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Constraint *string `json:"constraint,omitempty" tf:"constraint,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Constraint *string `json:"constraint,omitempty" tf:"constraint,omitempty"` + // (String) + // +kubebuilder:validation:Optional + DefaultValues *string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` -// (String) -// +kubebuilder:validation:Optional -DefaultValues *string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` + // (String) + // +kubebuilder:validation:Optional + FkConstraint *string `json:"fkConstraint,omitempty" tf:"fk_constraint,omitempty"` -// (String) -// +kubebuilder:validation:Optional -FkConstraint *string `json:"fkConstraint,omitempty" tf:"fk_constraint,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Function *string `json:"function,omitempty" tf:"function,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Function *string `json:"function,omitempty" tf:"function,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Index *string `json:"index,omitempty" tf:"index,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Index *string `json:"index,omitempty" tf:"index,omitempty"` + // (String) + // +kubebuilder:validation:Optional + MaterializedView *string `json:"materializedView,omitempty" tf:"materialized_view,omitempty"` -// (String) -// +kubebuilder:validation:Optional -MaterializedView *string `json:"materializedView,omitempty" tf:"materialized_view,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PrimaryKey *string `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PrimaryKey *string `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Sequence *string `json:"sequence,omitempty" tf:"sequence,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Sequence *string `json:"sequence,omitempty" tf:"sequence,omitempty"` + // (String) + // +kubebuilder:validation:Optional + SequenceOwnedBy *string `json:"sequenceOwnedBy,omitempty" tf:"sequence_owned_by,omitempty"` -// (String) -// +kubebuilder:validation:Optional -SequenceOwnedBy *string `json:"sequenceOwnedBy,omitempty" tf:"sequence_owned_by,omitempty"` + // (String) + // +kubebuilder:validation:Optional + SequenceSet *string `json:"sequenceSet,omitempty" tf:"sequence_set,omitempty"` -// (String) -// +kubebuilder:validation:Optional -SequenceSet *string `json:"sequenceSet,omitempty" tf:"sequence_set,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Table *string `json:"table,omitempty" tf:"table,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Table *string `json:"table,omitempty" tf:"table,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Trigger *string `json:"trigger,omitempty" tf:"trigger,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -View *string `json:"view,omitempty" tf:"view,omitempty"` + // (String) + // +kubebuilder:validation:Optional + View *string `json:"view,omitempty" tf:"view,omitempty"` } - type PostgresSourceObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Connection []PostgresSourceConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []PostgresSourceConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ObjectTransferSettings []PostgresSourceObjectTransferSettingsObservation `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ObjectTransferSettings []PostgresSourceObjectTransferSettingsObservation `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []PostgresSourcePasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []PostgresSourcePasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + ServiceSchema *string `json:"serviceSchema,omitempty" tf:"service_schema,omitempty"` -// (String) -ServiceSchema *string `json:"serviceSchema,omitempty" tf:"service_schema,omitempty"` + // (Number) + SlotGigabyteLagLimit *float64 `json:"slotGigabyteLagLimit,omitempty" tf:"slot_gigabyte_lag_limit,omitempty"` -// (Number) -SlotGigabyteLagLimit *float64 `json:"slotGigabyteLagLimit,omitempty" tf:"slot_gigabyte_lag_limit,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type PostgresSourceParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []PostgresSourceConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []PostgresSourceConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlDatabase + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlDatabase -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Reference to a PostgresqlDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` -// Reference to a PostgresqlDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` + // Selector for a PostgresqlDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` -// Selector for a PostgresqlDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ObjectTransferSettings []PostgresSourceObjectTransferSettingsParameters `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ObjectTransferSettings []PostgresSourceObjectTransferSettingsParameters `json:"objectTransferSettings,omitempty" tf:"object_transfer_settings,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []PostgresSourcePasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []PostgresSourcePasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (List of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // References to SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + // (String) + // +kubebuilder:validation:Optional + ServiceSchema *string `json:"serviceSchema,omitempty" tf:"service_schema,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ServiceSchema *string `json:"serviceSchema,omitempty" tf:"service_schema,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + SlotGigabyteLagLimit *float64 `json:"slotGigabyteLagLimit,omitempty" tf:"slot_gigabyte_lag_limit,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -SlotGigabyteLagLimit *float64 `json:"slotGigabyteLagLimit,omitempty" tf:"slot_gigabyte_lag_limit,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // Reference to a PostgresqlUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` -// Reference to a PostgresqlUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` - -// Selector for a PostgresqlUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` + // Selector for a PostgresqlUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` } - type PostgresSourcePasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type PostgresSourcePasswordObservation struct { - } - type PostgresSourcePasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type PostgresTargetConnectionInitParameters struct { + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlCluster + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlCluster -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a PostgresqlCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a PostgresqlCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a PostgresqlCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a PostgresqlCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []PostgresTargetConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []PostgresTargetConnectionOnPremiseInitParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type PostgresTargetConnectionObservation struct { + // (String) + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OnPremise []PostgresTargetConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OnPremise []PostgresTargetConnectionOnPremiseObservation `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type PostgresTargetConnectionOnPremiseInitParameters struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []PostgresTargetConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []PostgresTargetConnectionOnPremiseTLSModeInitParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type PostgresTargetConnectionOnPremiseObservation struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TLSMode []PostgresTargetConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TLSMode []PostgresTargetConnectionOnPremiseTLSModeObservation `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type PostgresTargetConnectionOnPremiseParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TLSMode []PostgresTargetConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TLSMode []PostgresTargetConnectionOnPremiseTLSModeParameters `json:"tlsMode,omitempty" tf:"tls_mode,omitempty"` } - type PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters struct { - } - type PostgresTargetConnectionOnPremiseTLSModeDisabledObservation struct { - } - type PostgresTargetConnectionOnPremiseTLSModeDisabledParameters struct { - } - type PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type PostgresTargetConnectionOnPremiseTLSModeEnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type PostgresTargetConnectionOnPremiseTLSModeEnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type PostgresTargetConnectionOnPremiseTLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type PostgresTargetConnectionOnPremiseTLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []PostgresTargetConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []PostgresTargetConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []PostgresTargetConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []PostgresTargetConnectionOnPremiseTLSModeEnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type PostgresTargetConnectionOnPremiseTLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []PostgresTargetConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []PostgresTargetConnectionOnPremiseTLSModeDisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []PostgresTargetConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []PostgresTargetConnectionOnPremiseTLSModeEnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type PostgresTargetConnectionParameters struct { + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlCluster + // +kubebuilder:validation:Optional + MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlCluster -// +kubebuilder:validation:Optional -MdbClusterID *string `json:"mdbClusterId,omitempty" tf:"mdb_cluster_id,omitempty"` + // Reference to a PostgresqlCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` -// Reference to a PostgresqlCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDRef *v1.Reference `json:"mdbClusterIdRef,omitempty" tf:"-"` + // Selector for a PostgresqlCluster in mdb to populate mdbClusterId. + // +kubebuilder:validation:Optional + MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` -// Selector for a PostgresqlCluster in mdb to populate mdbClusterId. -// +kubebuilder:validation:Optional -MdbClusterIDSelector *v1.Selector `json:"mdbClusterIdSelector,omitempty" tf:"-"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OnPremise []PostgresTargetConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OnPremise []PostgresTargetConnectionOnPremiseParameters `json:"onPremise,omitempty" tf:"on_premise,omitempty"` } - type PostgresTargetInitParameters struct { + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []PostgresTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []PostgresTargetConnectionInitParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlDatabase + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlDatabase -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Reference to a PostgresqlDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` -// Reference to a PostgresqlDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` + // Selector for a PostgresqlDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` -// Selector for a PostgresqlDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` + // (Block List, Max: 1) (see below for nested schema) + Password []PostgresTargetPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []PostgresTargetPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` + // (List of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // References to SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + User *string `json:"user,omitempty" tf:"user,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -User *string `json:"user,omitempty" tf:"user,omitempty"` + // Reference to a PostgresqlUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` -// Reference to a PostgresqlUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` - -// Selector for a PostgresqlUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` + // Selector for a PostgresqlUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` } - type PostgresTargetObservation struct { + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Connection []PostgresTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Connection []PostgresTargetConnectionObservation `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []PostgresTargetPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []PostgresTargetPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type PostgresTargetParameters struct { + // (String) + // +kubebuilder:validation:Optional + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -// +kubebuilder:validation:Optional -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Connection []PostgresTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Connection []PostgresTargetConnectionParameters `json:"connection,omitempty" tf:"connection,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlDatabase + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlDatabase -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Reference to a PostgresqlDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` -// Reference to a PostgresqlDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseRef *v1.Reference `json:"databaseRef,omitempty" tf:"-"` + // Selector for a PostgresqlDatabase in mdb to populate database. + // +kubebuilder:validation:Optional + DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` -// Selector for a PostgresqlDatabase in mdb to populate database. -// +kubebuilder:validation:Optional -DatabaseSelector *v1.Selector `json:"databaseSelector,omitempty" tf:"-"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []PostgresTargetPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []PostgresTargetPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` + // (List of String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // References to SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroups. -// +kubebuilder:validation:Optional -SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractSpecName() -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // Reference to a PostgresqlUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` -// Reference to a PostgresqlUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserRef *v1.Reference `json:"userRef,omitempty" tf:"-"` - -// Selector for a PostgresqlUser in mdb to populate user. -// +kubebuilder:validation:Optional -UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` + // Selector for a PostgresqlUser in mdb to populate user. + // +kubebuilder:validation:Optional + UserSelector *v1.Selector `json:"userSelector,omitempty" tf:"-"` } - type PostgresTargetPasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type PostgresTargetPasswordObservation struct { - } - type PostgresTargetPasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type RoundRobinInitParameters struct { - } - type RoundRobinObservation struct { - } - type RoundRobinParameters struct { - } - type SaslInitParameters struct { + // (String) + Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` -// (String) -Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []SaslPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []SaslPasswordInitParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type SaslObservation struct { + // (String) + Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` -// (String) -Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Password []SaslPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Password []SaslPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type SaslParameters struct { + // (String) + // +kubebuilder:validation:Optional + Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Mechanism *string `json:"mechanism,omitempty" tf:"mechanism,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Password []SaslPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Password []SaslPasswordParameters `json:"password,omitempty" tf:"password,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -User *string `json:"user,omitempty" tf:"user,omitempty"` + // (String) + // +kubebuilder:validation:Optional + User *string `json:"user,omitempty" tf:"user,omitempty"` } - type SaslPasswordInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type SaslPasswordObservation struct { - } - type SaslPasswordParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type SerializerAutoInitParameters struct { - } - type SerializerAutoObservation struct { - } - type SerializerAutoParameters struct { - } - type SerializerDebeziumInitParameters struct { - -// (Block List) (see below for nested schema) -SerializerParameters []SerializerParametersInitParameters `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` + // (Block List) (see below for nested schema) + SerializerParameters []SerializerParametersInitParameters `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` } - type SerializerDebeziumObservation struct { - -// (Block List) (see below for nested schema) -SerializerParameters []SerializerParametersObservation `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` + // (Block List) (see below for nested schema) + SerializerParameters []SerializerParametersObservation `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` } - type SerializerDebeziumParameters struct { - -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -SerializerParameters []SerializerParametersParameters `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + SerializerParameters []SerializerParametersParameters `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` } - type SerializerDebeziumSerializerParametersInitParameters struct { + // (Boolean) + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -Value *string `json:"value,omitempty" tf:"value,omitempty"` + // (String) + Value *string `json:"value,omitempty" tf:"value,omitempty"` } - type SerializerDebeziumSerializerParametersObservation struct { + // (Boolean) + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -Value *string `json:"value,omitempty" tf:"value,omitempty"` + // (String) + Value *string `json:"value,omitempty" tf:"value,omitempty"` } - type SerializerDebeziumSerializerParametersParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Value *string `json:"value,omitempty" tf:"value,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` } - type SerializerInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + SerializerAuto []SerializerAutoInitParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SerializerAuto []SerializerAutoInitParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SerializerDebezium []SerializerDebeziumInitParameters `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SerializerDebezium []SerializerDebeziumInitParameters `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -SerializerJSON []SerializerJSONInitParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SerializerJSON []SerializerJSONInitParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` } - type SerializerJSONInitParameters struct { - } - type SerializerJSONObservation struct { - } - type SerializerJSONParameters struct { - } - type SerializerObservation struct { + // (Block List, Max: 1) (see below for nested schema) + SerializerAuto []SerializerAutoParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SerializerAuto []SerializerAutoParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SerializerDebezium []SerializerDebeziumObservation `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SerializerDebezium []SerializerDebeziumObservation `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -SerializerJSON []SerializerJSONParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SerializerJSON []SerializerJSONParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` } - type SerializerParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + SerializerAuto []SerializerAutoParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -SerializerAuto []SerializerAutoParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + SerializerDebezium []SerializerDebeziumParameters `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -SerializerDebezium []SerializerDebeziumParameters `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -SerializerJSON []SerializerJSONParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + SerializerJSON []SerializerJSONParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` } - type SerializerParametersInitParameters struct { + // (Boolean) + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -Value *string `json:"value,omitempty" tf:"value,omitempty"` + // (String) + Value *string `json:"value,omitempty" tf:"value,omitempty"` } - type SerializerParametersObservation struct { + // (Boolean) + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -Value *string `json:"value,omitempty" tf:"value,omitempty"` + // (String) + Value *string `json:"value,omitempty" tf:"value,omitempty"` } - type SerializerParametersParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Value *string `json:"value,omitempty" tf:"value,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` } - type SerializerSerializerAutoInitParameters struct { - } - type SerializerSerializerAutoObservation struct { - } - type SerializerSerializerAutoParameters struct { - } - type SerializerSerializerDebeziumInitParameters struct { - -// (Block List) (see below for nested schema) -SerializerParameters []SerializerDebeziumSerializerParametersInitParameters `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` + // (Block List) (see below for nested schema) + SerializerParameters []SerializerDebeziumSerializerParametersInitParameters `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` } - type SerializerSerializerDebeziumObservation struct { - -// (Block List) (see below for nested schema) -SerializerParameters []SerializerDebeziumSerializerParametersObservation `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` + // (Block List) (see below for nested schema) + SerializerParameters []SerializerDebeziumSerializerParametersObservation `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` } - type SerializerSerializerDebeziumParameters struct { - -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -SerializerParameters []SerializerDebeziumSerializerParametersParameters `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + SerializerParameters []SerializerDebeziumSerializerParametersParameters `json:"serializerParameters,omitempty" tf:"serializer_parameters,omitempty"` } - type SerializerSerializerJSONInitParameters struct { - } - type SerializerSerializerJSONObservation struct { - } - type SerializerSerializerJSONParameters struct { - } - type SettingsInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + ClickhouseSource []ClickhouseSourceInitParameters `json:"clickhouseSource,omitempty" tf:"clickhouse_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ClickhouseSource []ClickhouseSourceInitParameters `json:"clickhouseSource,omitempty" tf:"clickhouse_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ClickhouseTarget []ClickhouseTargetInitParameters `json:"clickhouseTarget,omitempty" tf:"clickhouse_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ClickhouseTarget []ClickhouseTargetInitParameters `json:"clickhouseTarget,omitempty" tf:"clickhouse_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + KafkaSource []KafkaSourceInitParameters `json:"kafkaSource,omitempty" tf:"kafka_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -KafkaSource []KafkaSourceInitParameters `json:"kafkaSource,omitempty" tf:"kafka_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + KafkaTarget []KafkaTargetInitParameters `json:"kafkaTarget,omitempty" tf:"kafka_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -KafkaTarget []KafkaTargetInitParameters `json:"kafkaTarget,omitempty" tf:"kafka_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MetrikaSource []MetrikaSourceInitParameters `json:"metrikaSource,omitempty" tf:"metrika_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MetrikaSource []MetrikaSourceInitParameters `json:"metrikaSource,omitempty" tf:"metrika_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MongoSource []MongoSourceInitParameters `json:"mongoSource,omitempty" tf:"mongo_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MongoSource []MongoSourceInitParameters `json:"mongoSource,omitempty" tf:"mongo_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MongoTarget []MongoTargetInitParameters `json:"mongoTarget,omitempty" tf:"mongo_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MongoTarget []MongoTargetInitParameters `json:"mongoTarget,omitempty" tf:"mongo_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MySQLSource []MySQLSourceInitParameters `json:"mysqlSource,omitempty" tf:"mysql_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MySQLSource []MySQLSourceInitParameters `json:"mysqlSource,omitempty" tf:"mysql_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MySQLTarget []MySQLTargetInitParameters `json:"mysqlTarget,omitempty" tf:"mysql_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MySQLTarget []MySQLTargetInitParameters `json:"mysqlTarget,omitempty" tf:"mysql_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + PostgresSource []PostgresSourceInitParameters `json:"postgresSource,omitempty" tf:"postgres_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -PostgresSource []PostgresSourceInitParameters `json:"postgresSource,omitempty" tf:"postgres_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + PostgresTarget []PostgresTargetInitParameters `json:"postgresTarget,omitempty" tf:"postgres_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -PostgresTarget []PostgresTargetInitParameters `json:"postgresTarget,omitempty" tf:"postgres_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YdbSource []YdbSourceInitParameters `json:"ydbSource,omitempty" tf:"ydb_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -YdbSource []YdbSourceInitParameters `json:"ydbSource,omitempty" tf:"ydb_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YdbTarget []YdbTargetInitParameters `json:"ydbTarget,omitempty" tf:"ydb_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -YdbTarget []YdbTargetInitParameters `json:"ydbTarget,omitempty" tf:"ydb_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YdsSource []YdsSourceInitParameters `json:"ydsSource,omitempty" tf:"yds_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -YdsSource []YdsSourceInitParameters `json:"ydsSource,omitempty" tf:"yds_source,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -YdsTarget []YdsTargetInitParameters `json:"ydsTarget,omitempty" tf:"yds_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YdsTarget []YdsTargetInitParameters `json:"ydsTarget,omitempty" tf:"yds_target,omitempty"` } - type SettingsObservation struct { + // (Block List, Max: 1) (see below for nested schema) + ClickhouseSource []ClickhouseSourceObservation `json:"clickhouseSource,omitempty" tf:"clickhouse_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ClickhouseSource []ClickhouseSourceObservation `json:"clickhouseSource,omitempty" tf:"clickhouse_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ClickhouseTarget []ClickhouseTargetObservation `json:"clickhouseTarget,omitempty" tf:"clickhouse_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ClickhouseTarget []ClickhouseTargetObservation `json:"clickhouseTarget,omitempty" tf:"clickhouse_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + KafkaSource []KafkaSourceObservation `json:"kafkaSource,omitempty" tf:"kafka_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -KafkaSource []KafkaSourceObservation `json:"kafkaSource,omitempty" tf:"kafka_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + KafkaTarget []KafkaTargetObservation `json:"kafkaTarget,omitempty" tf:"kafka_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -KafkaTarget []KafkaTargetObservation `json:"kafkaTarget,omitempty" tf:"kafka_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MetrikaSource []MetrikaSourceObservation `json:"metrikaSource,omitempty" tf:"metrika_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MetrikaSource []MetrikaSourceObservation `json:"metrikaSource,omitempty" tf:"metrika_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MongoSource []MongoSourceObservation `json:"mongoSource,omitempty" tf:"mongo_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MongoSource []MongoSourceObservation `json:"mongoSource,omitempty" tf:"mongo_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MongoTarget []MongoTargetObservation `json:"mongoTarget,omitempty" tf:"mongo_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MongoTarget []MongoTargetObservation `json:"mongoTarget,omitempty" tf:"mongo_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MySQLSource []MySQLSourceObservation `json:"mysqlSource,omitempty" tf:"mysql_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MySQLSource []MySQLSourceObservation `json:"mysqlSource,omitempty" tf:"mysql_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MySQLTarget []MySQLTargetObservation `json:"mysqlTarget,omitempty" tf:"mysql_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MySQLTarget []MySQLTargetObservation `json:"mysqlTarget,omitempty" tf:"mysql_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + PostgresSource []PostgresSourceObservation `json:"postgresSource,omitempty" tf:"postgres_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -PostgresSource []PostgresSourceObservation `json:"postgresSource,omitempty" tf:"postgres_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + PostgresTarget []PostgresTargetObservation `json:"postgresTarget,omitempty" tf:"postgres_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -PostgresTarget []PostgresTargetObservation `json:"postgresTarget,omitempty" tf:"postgres_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YdbSource []YdbSourceObservation `json:"ydbSource,omitempty" tf:"ydb_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -YdbSource []YdbSourceObservation `json:"ydbSource,omitempty" tf:"ydb_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YdbTarget []YdbTargetObservation `json:"ydbTarget,omitempty" tf:"ydb_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -YdbTarget []YdbTargetObservation `json:"ydbTarget,omitempty" tf:"ydb_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YdsSource []YdsSourceObservation `json:"ydsSource,omitempty" tf:"yds_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -YdsSource []YdsSourceObservation `json:"ydsSource,omitempty" tf:"yds_source,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -YdsTarget []YdsTargetObservation `json:"ydsTarget,omitempty" tf:"yds_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YdsTarget []YdsTargetObservation `json:"ydsTarget,omitempty" tf:"yds_target,omitempty"` } - type SettingsParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ClickhouseSource []ClickhouseSourceParameters `json:"clickhouseSource,omitempty" tf:"clickhouse_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ClickhouseSource []ClickhouseSourceParameters `json:"clickhouseSource,omitempty" tf:"clickhouse_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ClickhouseTarget []ClickhouseTargetParameters `json:"clickhouseTarget,omitempty" tf:"clickhouse_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ClickhouseTarget []ClickhouseTargetParameters `json:"clickhouseTarget,omitempty" tf:"clickhouse_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + KafkaSource []KafkaSourceParameters `json:"kafkaSource,omitempty" tf:"kafka_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -KafkaSource []KafkaSourceParameters `json:"kafkaSource,omitempty" tf:"kafka_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + KafkaTarget []KafkaTargetParameters `json:"kafkaTarget,omitempty" tf:"kafka_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -KafkaTarget []KafkaTargetParameters `json:"kafkaTarget,omitempty" tf:"kafka_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + MetrikaSource []MetrikaSourceParameters `json:"metrikaSource,omitempty" tf:"metrika_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -MetrikaSource []MetrikaSourceParameters `json:"metrikaSource,omitempty" tf:"metrika_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + MongoSource []MongoSourceParameters `json:"mongoSource,omitempty" tf:"mongo_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -MongoSource []MongoSourceParameters `json:"mongoSource,omitempty" tf:"mongo_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + MongoTarget []MongoTargetParameters `json:"mongoTarget,omitempty" tf:"mongo_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -MongoTarget []MongoTargetParameters `json:"mongoTarget,omitempty" tf:"mongo_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + MySQLSource []MySQLSourceParameters `json:"mysqlSource,omitempty" tf:"mysql_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -MySQLSource []MySQLSourceParameters `json:"mysqlSource,omitempty" tf:"mysql_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + MySQLTarget []MySQLTargetParameters `json:"mysqlTarget,omitempty" tf:"mysql_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -MySQLTarget []MySQLTargetParameters `json:"mysqlTarget,omitempty" tf:"mysql_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + PostgresSource []PostgresSourceParameters `json:"postgresSource,omitempty" tf:"postgres_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -PostgresSource []PostgresSourceParameters `json:"postgresSource,omitempty" tf:"postgres_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + PostgresTarget []PostgresTargetParameters `json:"postgresTarget,omitempty" tf:"postgres_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -PostgresTarget []PostgresTargetParameters `json:"postgresTarget,omitempty" tf:"postgres_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + YdbSource []YdbSourceParameters `json:"ydbSource,omitempty" tf:"ydb_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -YdbSource []YdbSourceParameters `json:"ydbSource,omitempty" tf:"ydb_source,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + YdbTarget []YdbTargetParameters `json:"ydbTarget,omitempty" tf:"ydb_target,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -YdbTarget []YdbTargetParameters `json:"ydbTarget,omitempty" tf:"ydb_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + YdsSource []YdsSourceParameters `json:"ydsSource,omitempty" tf:"yds_source,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -YdsSource []YdsSourceParameters `json:"ydsSource,omitempty" tf:"yds_source,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -YdsTarget []YdsTargetParameters `json:"ydsTarget,omitempty" tf:"yds_target,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + YdsTarget []YdsTargetParameters `json:"ydsTarget,omitempty" tf:"yds_target,omitempty"` } - type ShardingInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + ColumnValueHash []ColumnValueHashInitParameters `json:"columnValueHash,omitempty" tf:"column_value_hash,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ColumnValueHash []ColumnValueHashInitParameters `json:"columnValueHash,omitempty" tf:"column_value_hash,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + CustomMapping []CustomMappingInitParameters `json:"customMapping,omitempty" tf:"custom_mapping,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -CustomMapping []CustomMappingInitParameters `json:"customMapping,omitempty" tf:"custom_mapping,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + RoundRobin []RoundRobinInitParameters `json:"roundRobin,omitempty" tf:"round_robin,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -RoundRobin []RoundRobinInitParameters `json:"roundRobin,omitempty" tf:"round_robin,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TransferID []TransferIDInitParameters `json:"transferId,omitempty" tf:"transfer_id,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TransferID []TransferIDInitParameters `json:"transferId,omitempty" tf:"transfer_id,omitempty"` } - type ShardingObservation struct { + // (Block List, Max: 1) (see below for nested schema) + ColumnValueHash []ColumnValueHashObservation `json:"columnValueHash,omitempty" tf:"column_value_hash,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ColumnValueHash []ColumnValueHashObservation `json:"columnValueHash,omitempty" tf:"column_value_hash,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + CustomMapping []CustomMappingObservation `json:"customMapping,omitempty" tf:"custom_mapping,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -CustomMapping []CustomMappingObservation `json:"customMapping,omitempty" tf:"custom_mapping,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + RoundRobin []RoundRobinParameters `json:"roundRobin,omitempty" tf:"round_robin,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -RoundRobin []RoundRobinParameters `json:"roundRobin,omitempty" tf:"round_robin,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TransferID []TransferIDParameters `json:"transferId,omitempty" tf:"transfer_id,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TransferID []TransferIDParameters `json:"transferId,omitempty" tf:"transfer_id,omitempty"` } - type ShardingParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ColumnValueHash []ColumnValueHashParameters `json:"columnValueHash,omitempty" tf:"column_value_hash,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ColumnValueHash []ColumnValueHashParameters `json:"columnValueHash,omitempty" tf:"column_value_hash,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + CustomMapping []CustomMappingParameters `json:"customMapping,omitempty" tf:"custom_mapping,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -CustomMapping []CustomMappingParameters `json:"customMapping,omitempty" tf:"custom_mapping,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + RoundRobin []RoundRobinParameters `json:"roundRobin,omitempty" tf:"round_robin,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -RoundRobin []RoundRobinParameters `json:"roundRobin,omitempty" tf:"round_robin,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TransferID []TransferIDParameters `json:"transferId,omitempty" tf:"transfer_id,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TransferID []TransferIDParameters `json:"transferId,omitempty" tf:"transfer_id,omitempty"` } - type ShardsInitParameters struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type ShardsObservation struct { + // (List of String) + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` - -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type ShardsParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type StreamsInitParameters struct { + // (List of String) + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` -// (List of String) -Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` - -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type StreamsObservation struct { + // (List of String) + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` -// (List of String) -Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` - -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type StreamsParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type TLSModeDisabledInitParameters struct { - } - type TLSModeDisabledObservation struct { - } - type TLSModeDisabledParameters struct { - } - type TLSModeEnabledInitParameters struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type TLSModeEnabledObservation struct { - -// (String) -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type TLSModeEnabledParameters struct { - -// (String) -// +kubebuilder:validation:Optional -CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CACertificate *string `json:"caCertificate,omitempty" tf:"ca_certificate,omitempty"` } - type TLSModeInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []DisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []DisabledInitParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []EnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []EnabledInitParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type TLSModeObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Disabled []DisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Disabled []DisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Enabled []EnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Enabled []EnabledObservation `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type TLSModeParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Disabled []DisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Disabled []DisabledParameters `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Enabled []EnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Enabled []EnabledParameters `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type TokenInitParameters struct { - -// (String, Sensitive) -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type TokenObservation struct { - } - type TokenParameters struct { - -// (String, Sensitive) -// +kubebuilder:validation:Optional -RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + RawSecretRef *v1.SecretKeySelector `json:"rawSecretRef,omitempty" tf:"-"` } - type TopicInitParameters struct { + // (Boolean) + SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` -// (Boolean) -SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` - -// (String) -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // (String) + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` } - type TopicObservation struct { + // (Boolean) + SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` -// (Boolean) -SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` - -// (String) -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // (String) + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` } - type TopicParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` } - type TopicSettingsInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Topic []TopicInitParameters `json:"topic,omitempty" tf:"topic,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Topic []TopicInitParameters `json:"topic,omitempty" tf:"topic,omitempty"` - -// (String) -TopicPrefix *string `json:"topicPrefix,omitempty" tf:"topic_prefix,omitempty"` + // (String) + TopicPrefix *string `json:"topicPrefix,omitempty" tf:"topic_prefix,omitempty"` } - type TopicSettingsObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Topic []TopicObservation `json:"topic,omitempty" tf:"topic,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Topic []TopicObservation `json:"topic,omitempty" tf:"topic,omitempty"` - -// (String) -TopicPrefix *string `json:"topicPrefix,omitempty" tf:"topic_prefix,omitempty"` + // (String) + TopicPrefix *string `json:"topicPrefix,omitempty" tf:"topic_prefix,omitempty"` } - type TopicSettingsParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Topic []TopicParameters `json:"topic,omitempty" tf:"topic,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Topic []TopicParameters `json:"topic,omitempty" tf:"topic,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -TopicPrefix *string `json:"topicPrefix,omitempty" tf:"topic_prefix,omitempty"` + // (String) + // +kubebuilder:validation:Optional + TopicPrefix *string `json:"topicPrefix,omitempty" tf:"topic_prefix,omitempty"` } - type TransferIDInitParameters struct { - } - type TransferIDObservation struct { - } - type TransferIDParameters struct { - } - type TransformerInitParameters struct { + // (String) + BufferFlushInterval *string `json:"bufferFlushInterval,omitempty" tf:"buffer_flush_interval,omitempty"` -// (String) -BufferFlushInterval *string `json:"bufferFlushInterval,omitempty" tf:"buffer_flush_interval,omitempty"` + // (String) + BufferSize *string `json:"bufferSize,omitempty" tf:"buffer_size,omitempty"` -// (String) -BufferSize *string `json:"bufferSize,omitempty" tf:"buffer_size,omitempty"` + // (String) + CloudFunction *string `json:"cloudFunction,omitempty" tf:"cloud_function,omitempty"` -// (String) -CloudFunction *string `json:"cloudFunction,omitempty" tf:"cloud_function,omitempty"` + // (String) + InvocationTimeout *string `json:"invocationTimeout,omitempty" tf:"invocation_timeout,omitempty"` -// (String) -InvocationTimeout *string `json:"invocationTimeout,omitempty" tf:"invocation_timeout,omitempty"` + // (Number) + NumberOfRetries *float64 `json:"numberOfRetries,omitempty" tf:"number_of_retries,omitempty"` -// (Number) -NumberOfRetries *float64 `json:"numberOfRetries,omitempty" tf:"number_of_retries,omitempty"` - -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type TransformerObservation struct { + // (String) + BufferFlushInterval *string `json:"bufferFlushInterval,omitempty" tf:"buffer_flush_interval,omitempty"` -// (String) -BufferFlushInterval *string `json:"bufferFlushInterval,omitempty" tf:"buffer_flush_interval,omitempty"` + // (String) + BufferSize *string `json:"bufferSize,omitempty" tf:"buffer_size,omitempty"` -// (String) -BufferSize *string `json:"bufferSize,omitempty" tf:"buffer_size,omitempty"` + // (String) + CloudFunction *string `json:"cloudFunction,omitempty" tf:"cloud_function,omitempty"` -// (String) -CloudFunction *string `json:"cloudFunction,omitempty" tf:"cloud_function,omitempty"` + // (String) + InvocationTimeout *string `json:"invocationTimeout,omitempty" tf:"invocation_timeout,omitempty"` -// (String) -InvocationTimeout *string `json:"invocationTimeout,omitempty" tf:"invocation_timeout,omitempty"` + // (Number) + NumberOfRetries *float64 `json:"numberOfRetries,omitempty" tf:"number_of_retries,omitempty"` -// (Number) -NumberOfRetries *float64 `json:"numberOfRetries,omitempty" tf:"number_of_retries,omitempty"` - -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type TransformerParameters struct { + // (String) + // +kubebuilder:validation:Optional + BufferFlushInterval *string `json:"bufferFlushInterval,omitempty" tf:"buffer_flush_interval,omitempty"` -// (String) -// +kubebuilder:validation:Optional -BufferFlushInterval *string `json:"bufferFlushInterval,omitempty" tf:"buffer_flush_interval,omitempty"` + // (String) + // +kubebuilder:validation:Optional + BufferSize *string `json:"bufferSize,omitempty" tf:"buffer_size,omitempty"` -// (String) -// +kubebuilder:validation:Optional -BufferSize *string `json:"bufferSize,omitempty" tf:"buffer_size,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CloudFunction *string `json:"cloudFunction,omitempty" tf:"cloud_function,omitempty"` -// (String) -// +kubebuilder:validation:Optional -CloudFunction *string `json:"cloudFunction,omitempty" tf:"cloud_function,omitempty"` + // (String) + // +kubebuilder:validation:Optional + InvocationTimeout *string `json:"invocationTimeout,omitempty" tf:"invocation_timeout,omitempty"` -// (String) -// +kubebuilder:validation:Optional -InvocationTimeout *string `json:"invocationTimeout,omitempty" tf:"invocation_timeout,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + NumberOfRetries *float64 `json:"numberOfRetries,omitempty" tf:"number_of_retries,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -NumberOfRetries *float64 `json:"numberOfRetries,omitempty" tf:"number_of_retries,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type TskvParserDataSchemaFieldsFieldsInitParameters struct { + // (Boolean) + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *bool `json:"key,omitempty" tf:"key,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (Boolean) + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (Boolean) -Required *bool `json:"required,omitempty" tf:"required,omitempty"` - -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type TskvParserDataSchemaFieldsFieldsObservation struct { + // (Boolean) + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -Key *bool `json:"key,omitempty" tf:"key,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (Boolean) + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (Boolean) -Required *bool `json:"required,omitempty" tf:"required,omitempty"` - -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type TskvParserDataSchemaFieldsFieldsParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + Key *bool `json:"key,omitempty" tf:"key,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Key *bool `json:"key,omitempty" tf:"key,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + Required *bool `json:"required,omitempty" tf:"required,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -Required *bool `json:"required,omitempty" tf:"required,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type TskvParserDataSchemaFieldsInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -Fields []TskvParserDataSchemaFieldsFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Fields []TskvParserDataSchemaFieldsFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` } - type TskvParserDataSchemaFieldsObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -Fields []TskvParserDataSchemaFieldsFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Fields []TskvParserDataSchemaFieldsFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` } - type TskvParserDataSchemaFieldsParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Fields []TskvParserDataSchemaFieldsFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Fields []TskvParserDataSchemaFieldsFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` } - type TskvParserDataSchemaInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Fields []DataSchemaFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Fields []DataSchemaFieldsInitParameters `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type TskvParserDataSchemaObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Fields []DataSchemaFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Fields []DataSchemaFieldsObservation `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type TskvParserDataSchemaParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Fields []DataSchemaFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Fields []DataSchemaFieldsParameters `json:"fields,omitempty" tf:"fields,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` + // (String) + // +kubebuilder:validation:Optional + JSONFields *string `json:"jsonFields,omitempty" tf:"json_fields,omitempty"` } - type TskvParserInitParameters struct { + // (Boolean) + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + DataSchema []TskvParserDataSchemaInitParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -DataSchema []TskvParserDataSchemaInitParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Boolean) + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` - -// (Boolean) -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type TskvParserObservation struct { + // (Boolean) + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + DataSchema []TskvParserDataSchemaObservation `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -DataSchema []TskvParserDataSchemaObservation `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Boolean) + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` - -// (Boolean) -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type TskvParserParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -AddRestColumn *bool `json:"addRestColumn,omitempty" tf:"add_rest_column,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + DataSchema []TskvParserDataSchemaParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -DataSchema []TskvParserDataSchemaParameters `json:"dataSchema,omitempty" tf:"data_schema,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -NullKeysAllowed *bool `json:"nullKeysAllowed,omitempty" tf:"null_keys_allowed,omitempty"` - -// (Boolean) -// +kubebuilder:validation:Optional -UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + UnescapeStringValues *bool `json:"unescapeStringValues,omitempty" tf:"unescape_string_values,omitempty"` } - type YdbSourceInitParameters struct { + // (String) + ChangefeedCustomName *string `json:"changefeedCustomName,omitempty" tf:"changefeed_custom_name,omitempty"` -// (String) -ChangefeedCustomName *string `json:"changefeedCustomName,omitempty" tf:"changefeed_custom_name,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` -// (String) -Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` + // (List of String) + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` -// (List of String) -Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` + // (String, Sensitive) + SaKeyContentSecretRef *v1.SecretKeySelector `json:"saKeyContentSecretRef,omitempty" tf:"-"` -// (String, Sensitive) -SaKeyContentSecretRef *v1.SecretKeySelector `json:"saKeyContentSecretRef,omitempty" tf:"-"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type YdbSourceObservation struct { + // (String) + ChangefeedCustomName *string `json:"changefeedCustomName,omitempty" tf:"changefeed_custom_name,omitempty"` -// (String) -ChangefeedCustomName *string `json:"changefeedCustomName,omitempty" tf:"changefeed_custom_name,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` -// (String) -Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` + // (List of String) + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` -// (List of String) -Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type YdbSourceParameters struct { + // (String) + // +kubebuilder:validation:Optional + ChangefeedCustomName *string `json:"changefeedCustomName,omitempty" tf:"changefeed_custom_name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ChangefeedCustomName *string `json:"changefeedCustomName,omitempty" tf:"changefeed_custom_name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + SaKeyContentSecretRef *v1.SecretKeySelector `json:"saKeyContentSecretRef,omitempty" tf:"-"` -// (String, Sensitive) -// +kubebuilder:validation:Optional -SaKeyContentSecretRef *v1.SecretKeySelector `json:"saKeyContentSecretRef,omitempty" tf:"-"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type YdbTargetInitParameters struct { + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + DefaultCompression *string `json:"defaultCompression,omitempty" tf:"default_compression,omitempty"` -// (String) -DefaultCompression *string `json:"defaultCompression,omitempty" tf:"default_compression,omitempty"` + // (String) + Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` -// (String) -Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` + // (Boolean) + IsTableColumnOriented *bool `json:"isTableColumnOriented,omitempty" tf:"is_table_column_oriented,omitempty"` -// (Boolean) -IsTableColumnOriented *bool `json:"isTableColumnOriented,omitempty" tf:"is_table_column_oriented,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (String, Sensitive) + SaKeyContentSecretRef *v1.SecretKeySelector `json:"saKeyContentSecretRef,omitempty" tf:"-"` -// (String, Sensitive) -SaKeyContentSecretRef *v1.SecretKeySelector `json:"saKeyContentSecretRef,omitempty" tf:"-"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type YdbTargetObservation struct { + // (String) + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + DefaultCompression *string `json:"defaultCompression,omitempty" tf:"default_compression,omitempty"` -// (String) -DefaultCompression *string `json:"defaultCompression,omitempty" tf:"default_compression,omitempty"` + // (String) + Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` -// (String) -Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` + // (Boolean) + IsTableColumnOriented *bool `json:"isTableColumnOriented,omitempty" tf:"is_table_column_oriented,omitempty"` -// (Boolean) -IsTableColumnOriented *bool `json:"isTableColumnOriented,omitempty" tf:"is_table_column_oriented,omitempty"` + // (String) + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (String) -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type YdbTargetParameters struct { + // (String) + // +kubebuilder:validation:Optional + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// (String) -// +kubebuilder:validation:Optional -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + // +kubebuilder:validation:Optional + DefaultCompression *string `json:"defaultCompression,omitempty" tf:"default_compression,omitempty"` -// (String) -// +kubebuilder:validation:Optional -DefaultCompression *string `json:"defaultCompression,omitempty" tf:"default_compression,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + IsTableColumnOriented *bool `json:"isTableColumnOriented,omitempty" tf:"is_table_column_oriented,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -IsTableColumnOriented *bool `json:"isTableColumnOriented,omitempty" tf:"is_table_column_oriented,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // (String, Sensitive) + // +kubebuilder:validation:Optional + SaKeyContentSecretRef *v1.SecretKeySelector `json:"saKeyContentSecretRef,omitempty" tf:"-"` -// (String, Sensitive) -// +kubebuilder:validation:Optional -SaKeyContentSecretRef *v1.SecretKeySelector `json:"saKeyContentSecretRef,omitempty" tf:"-"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type YdsSourceInitParameters struct { + // (Boolean) + AllowTTLRewind *bool `json:"allowTtlRewind,omitempty" tf:"allow_ttl_rewind,omitempty"` -// (Boolean) -AllowTTLRewind *bool `json:"allowTtlRewind,omitempty" tf:"allow_ttl_rewind,omitempty"` + // (String) + Consumer *string `json:"consumer,omitempty" tf:"consumer,omitempty"` -// (String) -Consumer *string `json:"consumer,omitempty" tf:"consumer,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// (String) -Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Parser []YdsSourceParserInitParameters `json:"parser,omitempty" tf:"parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Parser []YdsSourceParserInitParameters `json:"parser,omitempty" tf:"parser,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (String) + Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` -// (String) -Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (List of String) -SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` + // (List of String) + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` } - type YdsSourceObservation struct { + // (Boolean) + AllowTTLRewind *bool `json:"allowTtlRewind,omitempty" tf:"allow_ttl_rewind,omitempty"` -// (Boolean) -AllowTTLRewind *bool `json:"allowTtlRewind,omitempty" tf:"allow_ttl_rewind,omitempty"` + // (String) + Consumer *string `json:"consumer,omitempty" tf:"consumer,omitempty"` -// (String) -Consumer *string `json:"consumer,omitempty" tf:"consumer,omitempty"` + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// (String) -Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Parser []YdsSourceParserObservation `json:"parser,omitempty" tf:"parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Parser []YdsSourceParserObservation `json:"parser,omitempty" tf:"parser,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (String) + Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` -// (String) -Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (List of String) -SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` + // (List of String) + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` } - type YdsSourceParameters struct { + // (Boolean) + // +kubebuilder:validation:Optional + AllowTTLRewind *bool `json:"allowTtlRewind,omitempty" tf:"allow_ttl_rewind,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -AllowTTLRewind *bool `json:"allowTtlRewind,omitempty" tf:"allow_ttl_rewind,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Consumer *string `json:"consumer,omitempty" tf:"consumer,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Consumer *string `json:"consumer,omitempty" tf:"consumer,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Parser []YdsSourceParserParameters `json:"parser,omitempty" tf:"parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Parser []YdsSourceParserParameters `json:"parser,omitempty" tf:"parser,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` + // (String) + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// (String) -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` } - type YdsSourceParserInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + AuditTrailsV1Parser []ParserAuditTrailsV1ParserInitParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -AuditTrailsV1Parser []ParserAuditTrailsV1ParserInitParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + CloudLoggingParser []ParserCloudLoggingParserInitParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -CloudLoggingParser []ParserCloudLoggingParserInitParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + JSONParser []ParserJSONParserInitParameters `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -JSONParser []ParserJSONParserInitParameters `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TskvParser []ParserTskvParserInitParameters `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TskvParser []ParserTskvParserInitParameters `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` } - type YdsSourceParserObservation struct { + // (Block List, Max: 1) (see below for nested schema) + AuditTrailsV1Parser []ParserAuditTrailsV1ParserParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -AuditTrailsV1Parser []ParserAuditTrailsV1ParserParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + CloudLoggingParser []ParserCloudLoggingParserParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -CloudLoggingParser []ParserCloudLoggingParserParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + JSONParser []ParserJSONParserObservation `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -JSONParser []ParserJSONParserObservation `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TskvParser []ParserTskvParserObservation `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TskvParser []ParserTskvParserObservation `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` } - type YdsSourceParserParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + AuditTrailsV1Parser []ParserAuditTrailsV1ParserParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -AuditTrailsV1Parser []ParserAuditTrailsV1ParserParameters `json:"auditTrailsV1Parser,omitempty" tf:"audit_trails_v1_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + CloudLoggingParser []ParserCloudLoggingParserParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -CloudLoggingParser []ParserCloudLoggingParserParameters `json:"cloudLoggingParser,omitempty" tf:"cloud_logging_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + JSONParser []ParserJSONParserParameters `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -JSONParser []ParserJSONParserParameters `json:"jsonParser,omitempty" tf:"json_parser,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TskvParser []ParserTskvParserParameters `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TskvParser []ParserTskvParserParameters `json:"tskvParser,omitempty" tf:"tskv_parser,omitempty"` } - type YdsTargetInitParameters struct { + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// (String) -Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // (Boolean) + SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` -// (Boolean) -SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Serializer []YdsTargetSerializerInitParameters `json:"serializer,omitempty" tf:"serializer,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Serializer []YdsTargetSerializerInitParameters `json:"serializer,omitempty" tf:"serializer,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (String) + Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` -// (String) -Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type YdsTargetObservation struct { + // (String) + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// (String) -Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // (Boolean) + SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` -// (Boolean) -SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` + // (List of String) + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Serializer []YdsTargetSerializerObservation `json:"serializer,omitempty" tf:"serializer,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Serializer []YdsTargetSerializerObservation `json:"serializer,omitempty" tf:"serializer,omitempty"` + // (String) + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (String) + Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` -// (String) -Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` - -// (String) -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type YdsTargetParameters struct { + // (String) + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -SaveTxOrder *bool `json:"saveTxOrder,omitempty" tf:"save_tx_order,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Serializer []YdsTargetSerializerParameters `json:"serializer,omitempty" tf:"serializer,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Serializer []YdsTargetSerializerParameters `json:"serializer,omitempty" tf:"serializer,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type YdsTargetSerializerInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + SerializerAuto []SerializerSerializerAutoInitParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SerializerAuto []SerializerSerializerAutoInitParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SerializerDebezium []SerializerSerializerDebeziumInitParameters `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SerializerDebezium []SerializerSerializerDebeziumInitParameters `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -SerializerJSON []SerializerSerializerJSONInitParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SerializerJSON []SerializerSerializerJSONInitParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` } - type YdsTargetSerializerObservation struct { + // (Block List, Max: 1) (see below for nested schema) + SerializerAuto []SerializerSerializerAutoParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SerializerAuto []SerializerSerializerAutoParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SerializerDebezium []SerializerSerializerDebeziumObservation `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SerializerDebezium []SerializerSerializerDebeziumObservation `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -SerializerJSON []SerializerSerializerJSONParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SerializerJSON []SerializerSerializerJSONParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` } - type YdsTargetSerializerParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + SerializerAuto []SerializerSerializerAutoParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -SerializerAuto []SerializerSerializerAutoParameters `json:"serializerAuto,omitempty" tf:"serializer_auto,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + SerializerDebezium []SerializerSerializerDebeziumParameters `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -SerializerDebezium []SerializerSerializerDebeziumParameters `json:"serializerDebezium,omitempty" tf:"serializer_debezium,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -SerializerJSON []SerializerSerializerJSONParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + SerializerJSON []SerializerSerializerJSONParameters `json:"serializerJson,omitempty" tf:"serializer_json,omitempty"` } // EndpointSpec defines the desired state of Endpoint type EndpointSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider EndpointParameters `json:"forProvider"` + ForProvider EndpointParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -6347,21 +5514,20 @@ type EndpointSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider EndpointInitParameters `json:"initProvider,omitempty"` + InitProvider EndpointInitParameters `json:"initProvider,omitempty"` } // EndpointStatus defines the observed state of Endpoint. type EndpointStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider EndpointObservation `json:"atProvider,omitempty"` + AtProvider EndpointObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - -// Endpoint is the Schema for the Endpoints API. +// Endpoint is the Schema for the Endpoints API. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" diff --git a/apis/datatransfer/v1alpha1/zz_generated.conversion_hubs.go b/apis/datatransfer/v1alpha1/zz_generated.conversion_hubs.go index 6956a1f..974c0b6 100755 --- a/apis/datatransfer/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/datatransfer/v1alpha1/zz_generated.conversion_hubs.go @@ -1,13 +1,9 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *Endpoint) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *Endpoint) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *Transfer) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *Transfer) Hub() {} diff --git a/apis/datatransfer/v1alpha1/zz_generated.deepcopy.go b/apis/datatransfer/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..eff86e9 --- /dev/null +++ b/apis/datatransfer/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,16967 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AltNamesInitParameters) DeepCopyInto(out *AltNamesInitParameters) { + *out = *in + if in.FromName != nil { + in, out := &in.FromName, &out.FromName + *out = new(string) + **out = **in + } + if in.ToName != nil { + in, out := &in.ToName, &out.ToName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AltNamesInitParameters. +func (in *AltNamesInitParameters) DeepCopy() *AltNamesInitParameters { + if in == nil { + return nil + } + out := new(AltNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AltNamesObservation) DeepCopyInto(out *AltNamesObservation) { + *out = *in + if in.FromName != nil { + in, out := &in.FromName, &out.FromName + *out = new(string) + **out = **in + } + if in.ToName != nil { + in, out := &in.ToName, &out.ToName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AltNamesObservation. +func (in *AltNamesObservation) DeepCopy() *AltNamesObservation { + if in == nil { + return nil + } + out := new(AltNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AltNamesParameters) DeepCopyInto(out *AltNamesParameters) { + *out = *in + if in.FromName != nil { + in, out := &in.FromName, &out.FromName + *out = new(string) + **out = **in + } + if in.ToName != nil { + in, out := &in.ToName, &out.ToName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AltNamesParameters. +func (in *AltNamesParameters) DeepCopy() *AltNamesParameters { + if in == nil { + return nil + } + out := new(AltNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditTrailsV1ParserInitParameters) DeepCopyInto(out *AuditTrailsV1ParserInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditTrailsV1ParserInitParameters. +func (in *AuditTrailsV1ParserInitParameters) DeepCopy() *AuditTrailsV1ParserInitParameters { + if in == nil { + return nil + } + out := new(AuditTrailsV1ParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditTrailsV1ParserObservation) DeepCopyInto(out *AuditTrailsV1ParserObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditTrailsV1ParserObservation. +func (in *AuditTrailsV1ParserObservation) DeepCopy() *AuditTrailsV1ParserObservation { + if in == nil { + return nil + } + out := new(AuditTrailsV1ParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditTrailsV1ParserParameters) DeepCopyInto(out *AuditTrailsV1ParserParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditTrailsV1ParserParameters. +func (in *AuditTrailsV1ParserParameters) DeepCopy() *AuditTrailsV1ParserParameters { + if in == nil { + return nil + } + out := new(AuditTrailsV1ParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthInitParameters) DeepCopyInto(out *AuthInitParameters) { + *out = *in + if in.NoAuth != nil { + in, out := &in.NoAuth, &out.NoAuth + *out = make([]NoAuthInitParameters, len(*in)) + copy(*out, *in) + } + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = make([]SaslInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInitParameters. +func (in *AuthInitParameters) DeepCopy() *AuthInitParameters { + if in == nil { + return nil + } + out := new(AuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthNoAuthInitParameters) DeepCopyInto(out *AuthNoAuthInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthNoAuthInitParameters. +func (in *AuthNoAuthInitParameters) DeepCopy() *AuthNoAuthInitParameters { + if in == nil { + return nil + } + out := new(AuthNoAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthNoAuthObservation) DeepCopyInto(out *AuthNoAuthObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthNoAuthObservation. +func (in *AuthNoAuthObservation) DeepCopy() *AuthNoAuthObservation { + if in == nil { + return nil + } + out := new(AuthNoAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthNoAuthParameters) DeepCopyInto(out *AuthNoAuthParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthNoAuthParameters. +func (in *AuthNoAuthParameters) DeepCopy() *AuthNoAuthParameters { + if in == nil { + return nil + } + out := new(AuthNoAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthObservation) DeepCopyInto(out *AuthObservation) { + *out = *in + if in.NoAuth != nil { + in, out := &in.NoAuth, &out.NoAuth + *out = make([]NoAuthParameters, len(*in)) + copy(*out, *in) + } + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = make([]SaslObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthObservation. +func (in *AuthObservation) DeepCopy() *AuthObservation { + if in == nil { + return nil + } + out := new(AuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthParameters) DeepCopyInto(out *AuthParameters) { + *out = *in + if in.NoAuth != nil { + in, out := &in.NoAuth, &out.NoAuth + *out = make([]NoAuthParameters, len(*in)) + copy(*out, *in) + } + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = make([]SaslParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthParameters. +func (in *AuthParameters) DeepCopy() *AuthParameters { + if in == nil { + return nil + } + out := new(AuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSaslInitParameters) DeepCopyInto(out *AuthSaslInitParameters) { + *out = *in + if in.Mechanism != nil { + in, out := &in.Mechanism, &out.Mechanism + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]AuthSaslPasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSaslInitParameters. +func (in *AuthSaslInitParameters) DeepCopy() *AuthSaslInitParameters { + if in == nil { + return nil + } + out := new(AuthSaslInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSaslObservation) DeepCopyInto(out *AuthSaslObservation) { + *out = *in + if in.Mechanism != nil { + in, out := &in.Mechanism, &out.Mechanism + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]AuthSaslPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSaslObservation. +func (in *AuthSaslObservation) DeepCopy() *AuthSaslObservation { + if in == nil { + return nil + } + out := new(AuthSaslObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSaslParameters) DeepCopyInto(out *AuthSaslParameters) { + *out = *in + if in.Mechanism != nil { + in, out := &in.Mechanism, &out.Mechanism + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]AuthSaslPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSaslParameters. +func (in *AuthSaslParameters) DeepCopy() *AuthSaslParameters { + if in == nil { + return nil + } + out := new(AuthSaslParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSaslPasswordInitParameters) DeepCopyInto(out *AuthSaslPasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSaslPasswordInitParameters. +func (in *AuthSaslPasswordInitParameters) DeepCopy() *AuthSaslPasswordInitParameters { + if in == nil { + return nil + } + out := new(AuthSaslPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSaslPasswordObservation) DeepCopyInto(out *AuthSaslPasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSaslPasswordObservation. +func (in *AuthSaslPasswordObservation) DeepCopy() *AuthSaslPasswordObservation { + if in == nil { + return nil + } + out := new(AuthSaslPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSaslPasswordParameters) DeepCopyInto(out *AuthSaslPasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSaslPasswordParameters. +func (in *AuthSaslPasswordParameters) DeepCopy() *AuthSaslPasswordParameters { + if in == nil { + return nil + } + out := new(AuthSaslPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseSourceInitParameters) DeepCopyInto(out *ClickhouseSourceInitParameters) { + *out = *in + if in.ClickhouseClusterName != nil { + in, out := &in.ClickhouseClusterName, &out.ClickhouseClusterName + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]ConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseSourceInitParameters. +func (in *ClickhouseSourceInitParameters) DeepCopy() *ClickhouseSourceInitParameters { + if in == nil { + return nil + } + out := new(ClickhouseSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseSourceObservation) DeepCopyInto(out *ClickhouseSourceObservation) { + *out = *in + if in.ClickhouseClusterName != nil { + in, out := &in.ClickhouseClusterName, &out.ClickhouseClusterName + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]ConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseSourceObservation. +func (in *ClickhouseSourceObservation) DeepCopy() *ClickhouseSourceObservation { + if in == nil { + return nil + } + out := new(ClickhouseSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseSourceParameters) DeepCopyInto(out *ClickhouseSourceParameters) { + *out = *in + if in.ClickhouseClusterName != nil { + in, out := &in.ClickhouseClusterName, &out.ClickhouseClusterName + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]ConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseSourceParameters. +func (in *ClickhouseSourceParameters) DeepCopy() *ClickhouseSourceParameters { + if in == nil { + return nil + } + out := new(ClickhouseSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseTargetConnectionInitParameters) DeepCopyInto(out *ClickhouseTargetConnectionInitParameters) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]ConnectionConnectionOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseTargetConnectionInitParameters. +func (in *ClickhouseTargetConnectionInitParameters) DeepCopy() *ClickhouseTargetConnectionInitParameters { + if in == nil { + return nil + } + out := new(ClickhouseTargetConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseTargetConnectionObservation) DeepCopyInto(out *ClickhouseTargetConnectionObservation) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]ConnectionConnectionOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseTargetConnectionObservation. +func (in *ClickhouseTargetConnectionObservation) DeepCopy() *ClickhouseTargetConnectionObservation { + if in == nil { + return nil + } + out := new(ClickhouseTargetConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseTargetConnectionParameters) DeepCopyInto(out *ClickhouseTargetConnectionParameters) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]ConnectionConnectionOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseTargetConnectionParameters. +func (in *ClickhouseTargetConnectionParameters) DeepCopy() *ClickhouseTargetConnectionParameters { + if in == nil { + return nil + } + out := new(ClickhouseTargetConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseTargetInitParameters) DeepCopyInto(out *ClickhouseTargetInitParameters) { + *out = *in + if in.AltNames != nil { + in, out := &in.AltNames, &out.AltNames + *out = make([]AltNamesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.ClickhouseClusterName != nil { + in, out := &in.ClickhouseClusterName, &out.ClickhouseClusterName + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]ClickhouseTargetConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Sharding != nil { + in, out := &in.Sharding, &out.Sharding + *out = make([]ShardingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseTargetInitParameters. +func (in *ClickhouseTargetInitParameters) DeepCopy() *ClickhouseTargetInitParameters { + if in == nil { + return nil + } + out := new(ClickhouseTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseTargetObservation) DeepCopyInto(out *ClickhouseTargetObservation) { + *out = *in + if in.AltNames != nil { + in, out := &in.AltNames, &out.AltNames + *out = make([]AltNamesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.ClickhouseClusterName != nil { + in, out := &in.ClickhouseClusterName, &out.ClickhouseClusterName + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]ClickhouseTargetConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Sharding != nil { + in, out := &in.Sharding, &out.Sharding + *out = make([]ShardingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseTargetObservation. +func (in *ClickhouseTargetObservation) DeepCopy() *ClickhouseTargetObservation { + if in == nil { + return nil + } + out := new(ClickhouseTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseTargetParameters) DeepCopyInto(out *ClickhouseTargetParameters) { + *out = *in + if in.AltNames != nil { + in, out := &in.AltNames, &out.AltNames + *out = make([]AltNamesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.ClickhouseClusterName != nil { + in, out := &in.ClickhouseClusterName, &out.ClickhouseClusterName + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]ClickhouseTargetConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Sharding != nil { + in, out := &in.Sharding, &out.Sharding + *out = make([]ShardingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseTargetParameters. +func (in *ClickhouseTargetParameters) DeepCopy() *ClickhouseTargetParameters { + if in == nil { + return nil + } + out := new(ClickhouseTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudLoggingParserInitParameters) DeepCopyInto(out *CloudLoggingParserInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoggingParserInitParameters. +func (in *CloudLoggingParserInitParameters) DeepCopy() *CloudLoggingParserInitParameters { + if in == nil { + return nil + } + out := new(CloudLoggingParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudLoggingParserObservation) DeepCopyInto(out *CloudLoggingParserObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoggingParserObservation. +func (in *CloudLoggingParserObservation) DeepCopy() *CloudLoggingParserObservation { + if in == nil { + return nil + } + out := new(CloudLoggingParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudLoggingParserParameters) DeepCopyInto(out *CloudLoggingParserParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoggingParserParameters. +func (in *CloudLoggingParserParameters) DeepCopy() *CloudLoggingParserParameters { + if in == nil { + return nil + } + out := new(CloudLoggingParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionsInitParameters) DeepCopyInto(out *CollectionsInitParameters) { + *out = *in + if in.CollectionName != nil { + in, out := &in.CollectionName, &out.CollectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionsInitParameters. +func (in *CollectionsInitParameters) DeepCopy() *CollectionsInitParameters { + if in == nil { + return nil + } + out := new(CollectionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionsObservation) DeepCopyInto(out *CollectionsObservation) { + *out = *in + if in.CollectionName != nil { + in, out := &in.CollectionName, &out.CollectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionsObservation. +func (in *CollectionsObservation) DeepCopy() *CollectionsObservation { + if in == nil { + return nil + } + out := new(CollectionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionsParameters) DeepCopyInto(out *CollectionsParameters) { + *out = *in + if in.CollectionName != nil { + in, out := &in.CollectionName, &out.CollectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionsParameters. +func (in *CollectionsParameters) DeepCopy() *CollectionsParameters { + if in == nil { + return nil + } + out := new(CollectionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnValueHashInitParameters) DeepCopyInto(out *ColumnValueHashInitParameters) { + *out = *in + if in.ColumnName != nil { + in, out := &in.ColumnName, &out.ColumnName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnValueHashInitParameters. +func (in *ColumnValueHashInitParameters) DeepCopy() *ColumnValueHashInitParameters { + if in == nil { + return nil + } + out := new(ColumnValueHashInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnValueHashObservation) DeepCopyInto(out *ColumnValueHashObservation) { + *out = *in + if in.ColumnName != nil { + in, out := &in.ColumnName, &out.ColumnName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnValueHashObservation. +func (in *ColumnValueHashObservation) DeepCopy() *ColumnValueHashObservation { + if in == nil { + return nil + } + out := new(ColumnValueHashObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnValueHashParameters) DeepCopyInto(out *ColumnValueHashParameters) { + *out = *in + if in.ColumnName != nil { + in, out := &in.ColumnName, &out.ColumnName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnValueHashParameters. +func (in *ColumnValueHashParameters) DeepCopy() *ColumnValueHashParameters { + if in == nil { + return nil + } + out := new(ColumnValueHashParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnValueInitParameters) DeepCopyInto(out *ColumnValueInitParameters) { + *out = *in + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnValueInitParameters. +func (in *ColumnValueInitParameters) DeepCopy() *ColumnValueInitParameters { + if in == nil { + return nil + } + out := new(ColumnValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnValueObservation) DeepCopyInto(out *ColumnValueObservation) { + *out = *in + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnValueObservation. +func (in *ColumnValueObservation) DeepCopy() *ColumnValueObservation { + if in == nil { + return nil + } + out := new(ColumnValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnValueParameters) DeepCopyInto(out *ColumnValueParameters) { + *out = *in + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnValueParameters. +func (in *ColumnValueParameters) DeepCopy() *ColumnValueParameters { + if in == nil { + return nil + } + out := new(ColumnValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnsInitParameters) DeepCopyInto(out *ColumnsInitParameters) { + *out = *in + if in.ExcludeColumns != nil { + in, out := &in.ExcludeColumns, &out.ExcludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeColumns != nil { + in, out := &in.IncludeColumns, &out.IncludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnsInitParameters. +func (in *ColumnsInitParameters) DeepCopy() *ColumnsInitParameters { + if in == nil { + return nil + } + out := new(ColumnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnsObservation) DeepCopyInto(out *ColumnsObservation) { + *out = *in + if in.ExcludeColumns != nil { + in, out := &in.ExcludeColumns, &out.ExcludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeColumns != nil { + in, out := &in.IncludeColumns, &out.IncludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnsObservation. +func (in *ColumnsObservation) DeepCopy() *ColumnsObservation { + if in == nil { + return nil + } + out := new(ColumnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnsParameters) DeepCopyInto(out *ColumnsParameters) { + *out = *in + if in.ExcludeColumns != nil { + in, out := &in.ExcludeColumns, &out.ExcludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeColumns != nil { + in, out := &in.IncludeColumns, &out.IncludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnsParameters. +func (in *ColumnsParameters) DeepCopy() *ColumnsParameters { + if in == nil { + return nil + } + out := new(ColumnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsInitParameters) DeepCopyInto(out *ConnectionConnectionOptionsInitParameters) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]ConnectionOptionsOnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]ConnectionOptionsPasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsInitParameters. +func (in *ConnectionConnectionOptionsInitParameters) DeepCopy() *ConnectionConnectionOptionsInitParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsObservation) DeepCopyInto(out *ConnectionConnectionOptionsObservation) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]ConnectionOptionsOnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]ConnectionOptionsPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsObservation. +func (in *ConnectionConnectionOptionsObservation) DeepCopy() *ConnectionConnectionOptionsObservation { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseInitParameters) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ReplicaSet != nil { + in, out := &in.ReplicaSet, &out.ReplicaSet + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]ConnectionOptionsOnPremiseTLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseInitParameters. +func (in *ConnectionConnectionOptionsOnPremiseInitParameters) DeepCopy() *ConnectionConnectionOptionsOnPremiseInitParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseObservation) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ReplicaSet != nil { + in, out := &in.ReplicaSet, &out.ReplicaSet + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]ConnectionOptionsOnPremiseTLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseObservation. +func (in *ConnectionConnectionOptionsOnPremiseObservation) DeepCopy() *ConnectionConnectionOptionsOnPremiseObservation { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseParameters) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ReplicaSet != nil { + in, out := &in.ReplicaSet, &out.ReplicaSet + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]ConnectionOptionsOnPremiseTLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseParameters. +func (in *ConnectionConnectionOptionsOnPremiseParameters) DeepCopy() *ConnectionConnectionOptionsOnPremiseParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters) DeepCopy() *ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeDisabledObservation) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseTLSModeDisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseTLSModeDisabledObservation. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeDisabledObservation) DeepCopy() *ConnectionConnectionOptionsOnPremiseTLSModeDisabledObservation { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseTLSModeDisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters) DeepCopy() *ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters) DeepCopy() *ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation) DeepCopy() *ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters) DeepCopy() *ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeInitParameters) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseTLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]ConnectionConnectionOptionsOnPremiseTLSModeDisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]ConnectionConnectionOptionsOnPremiseTLSModeEnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseTLSModeInitParameters. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeInitParameters) DeepCopy() *ConnectionConnectionOptionsOnPremiseTLSModeInitParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseTLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeObservation) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseTLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]ConnectionConnectionOptionsOnPremiseTLSModeEnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseTLSModeObservation. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeObservation) DeepCopy() *ConnectionConnectionOptionsOnPremiseTLSModeObservation { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseTLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeParameters) DeepCopyInto(out *ConnectionConnectionOptionsOnPremiseTLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]ConnectionConnectionOptionsOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]ConnectionConnectionOptionsOnPremiseTLSModeEnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsOnPremiseTLSModeParameters. +func (in *ConnectionConnectionOptionsOnPremiseTLSModeParameters) DeepCopy() *ConnectionConnectionOptionsOnPremiseTLSModeParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsOnPremiseTLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsParameters) DeepCopyInto(out *ConnectionConnectionOptionsParameters) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]ConnectionOptionsOnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]ConnectionOptionsPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsParameters. +func (in *ConnectionConnectionOptionsParameters) DeepCopy() *ConnectionConnectionOptionsParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsPasswordInitParameters) DeepCopyInto(out *ConnectionConnectionOptionsPasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsPasswordInitParameters. +func (in *ConnectionConnectionOptionsPasswordInitParameters) DeepCopy() *ConnectionConnectionOptionsPasswordInitParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsPasswordObservation) DeepCopyInto(out *ConnectionConnectionOptionsPasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsPasswordObservation. +func (in *ConnectionConnectionOptionsPasswordObservation) DeepCopy() *ConnectionConnectionOptionsPasswordObservation { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConnectionOptionsPasswordParameters) DeepCopyInto(out *ConnectionConnectionOptionsPasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConnectionOptionsPasswordParameters. +func (in *ConnectionConnectionOptionsPasswordParameters) DeepCopy() *ConnectionConnectionOptionsPasswordParameters { + if in == nil { + return nil + } + out := new(ConnectionConnectionOptionsPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionInitParameters) DeepCopyInto(out *ConnectionInitParameters) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]ConnectionOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionInitParameters. +func (in *ConnectionInitParameters) DeepCopy() *ConnectionInitParameters { + if in == nil { + return nil + } + out := new(ConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionObservation) DeepCopyInto(out *ConnectionObservation) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]ConnectionOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionObservation. +func (in *ConnectionObservation) DeepCopy() *ConnectionObservation { + if in == nil { + return nil + } + out := new(ConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseInitParameters) DeepCopyInto(out *ConnectionOnPremiseInitParameters) { + *out = *in + if in.BrokerUrls != nil { + in, out := &in.BrokerUrls, &out.BrokerUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]ConnectionOnPremiseTLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseInitParameters. +func (in *ConnectionOnPremiseInitParameters) DeepCopy() *ConnectionOnPremiseInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseObservation) DeepCopyInto(out *ConnectionOnPremiseObservation) { + *out = *in + if in.BrokerUrls != nil { + in, out := &in.BrokerUrls, &out.BrokerUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]ConnectionOnPremiseTLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseObservation. +func (in *ConnectionOnPremiseObservation) DeepCopy() *ConnectionOnPremiseObservation { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseParameters) DeepCopyInto(out *ConnectionOnPremiseParameters) { + *out = *in + if in.BrokerUrls != nil { + in, out := &in.BrokerUrls, &out.BrokerUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]ConnectionOnPremiseTLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseParameters. +func (in *ConnectionOnPremiseParameters) DeepCopy() *ConnectionOnPremiseParameters { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopyInto(out *ConnectionOnPremiseTLSModeDisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseTLSModeDisabledInitParameters. +func (in *ConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopy() *ConnectionOnPremiseTLSModeDisabledInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseTLSModeDisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseTLSModeDisabledObservation) DeepCopyInto(out *ConnectionOnPremiseTLSModeDisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseTLSModeDisabledObservation. +func (in *ConnectionOnPremiseTLSModeDisabledObservation) DeepCopy() *ConnectionOnPremiseTLSModeDisabledObservation { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseTLSModeDisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseTLSModeDisabledParameters) DeepCopyInto(out *ConnectionOnPremiseTLSModeDisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseTLSModeDisabledParameters. +func (in *ConnectionOnPremiseTLSModeDisabledParameters) DeepCopy() *ConnectionOnPremiseTLSModeDisabledParameters { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseTLSModeDisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopyInto(out *ConnectionOnPremiseTLSModeEnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseTLSModeEnabledInitParameters. +func (in *ConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopy() *ConnectionOnPremiseTLSModeEnabledInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseTLSModeEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseTLSModeEnabledObservation) DeepCopyInto(out *ConnectionOnPremiseTLSModeEnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseTLSModeEnabledObservation. +func (in *ConnectionOnPremiseTLSModeEnabledObservation) DeepCopy() *ConnectionOnPremiseTLSModeEnabledObservation { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseTLSModeEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseTLSModeEnabledParameters) DeepCopyInto(out *ConnectionOnPremiseTLSModeEnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseTLSModeEnabledParameters. +func (in *ConnectionOnPremiseTLSModeEnabledParameters) DeepCopy() *ConnectionOnPremiseTLSModeEnabledParameters { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseTLSModeEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseTLSModeInitParameters) DeepCopyInto(out *ConnectionOnPremiseTLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]OnPremiseTLSModeDisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]OnPremiseTLSModeEnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseTLSModeInitParameters. +func (in *ConnectionOnPremiseTLSModeInitParameters) DeepCopy() *ConnectionOnPremiseTLSModeInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseTLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseTLSModeObservation) DeepCopyInto(out *ConnectionOnPremiseTLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]OnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]OnPremiseTLSModeEnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseTLSModeObservation. +func (in *ConnectionOnPremiseTLSModeObservation) DeepCopy() *ConnectionOnPremiseTLSModeObservation { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseTLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOnPremiseTLSModeParameters) DeepCopyInto(out *ConnectionOnPremiseTLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]OnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]OnPremiseTLSModeEnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOnPremiseTLSModeParameters. +func (in *ConnectionOnPremiseTLSModeParameters) DeepCopy() *ConnectionOnPremiseTLSModeParameters { + if in == nil { + return nil + } + out := new(ConnectionOnPremiseTLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsInitParameters) DeepCopyInto(out *ConnectionOptionsInitParameters) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]OnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]PasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsInitParameters. +func (in *ConnectionOptionsInitParameters) DeepCopy() *ConnectionOptionsInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsObservation) DeepCopyInto(out *ConnectionOptionsObservation) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]OnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]PasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsObservation. +func (in *ConnectionOptionsObservation) DeepCopy() *ConnectionOptionsObservation { + if in == nil { + return nil + } + out := new(ConnectionOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseInitParameters) DeepCopyInto(out *ConnectionOptionsOnPremiseInitParameters) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.NativePort != nil { + in, out := &in.NativePort, &out.NativePort + *out = new(float64) + **out = **in + } + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = make([]OnPremiseShardsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]OnPremiseTLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseInitParameters. +func (in *ConnectionOptionsOnPremiseInitParameters) DeepCopy() *ConnectionOptionsOnPremiseInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseObservation) DeepCopyInto(out *ConnectionOptionsOnPremiseObservation) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.NativePort != nil { + in, out := &in.NativePort, &out.NativePort + *out = new(float64) + **out = **in + } + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = make([]OnPremiseShardsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]OnPremiseTLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseObservation. +func (in *ConnectionOptionsOnPremiseObservation) DeepCopy() *ConnectionOptionsOnPremiseObservation { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseParameters) DeepCopyInto(out *ConnectionOptionsOnPremiseParameters) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.NativePort != nil { + in, out := &in.NativePort, &out.NativePort + *out = new(float64) + **out = **in + } + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = make([]OnPremiseShardsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]OnPremiseTLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseParameters. +func (in *ConnectionOptionsOnPremiseParameters) DeepCopy() *ConnectionOptionsOnPremiseParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseTLSModeDisabledInitParameters) DeepCopyInto(out *ConnectionOptionsOnPremiseTLSModeDisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseTLSModeDisabledInitParameters. +func (in *ConnectionOptionsOnPremiseTLSModeDisabledInitParameters) DeepCopy() *ConnectionOptionsOnPremiseTLSModeDisabledInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseTLSModeDisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseTLSModeDisabledObservation) DeepCopyInto(out *ConnectionOptionsOnPremiseTLSModeDisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseTLSModeDisabledObservation. +func (in *ConnectionOptionsOnPremiseTLSModeDisabledObservation) DeepCopy() *ConnectionOptionsOnPremiseTLSModeDisabledObservation { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseTLSModeDisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseTLSModeDisabledParameters) DeepCopyInto(out *ConnectionOptionsOnPremiseTLSModeDisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseTLSModeDisabledParameters. +func (in *ConnectionOptionsOnPremiseTLSModeDisabledParameters) DeepCopy() *ConnectionOptionsOnPremiseTLSModeDisabledParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseTLSModeDisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseTLSModeEnabledInitParameters) DeepCopyInto(out *ConnectionOptionsOnPremiseTLSModeEnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseTLSModeEnabledInitParameters. +func (in *ConnectionOptionsOnPremiseTLSModeEnabledInitParameters) DeepCopy() *ConnectionOptionsOnPremiseTLSModeEnabledInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseTLSModeEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseTLSModeEnabledObservation) DeepCopyInto(out *ConnectionOptionsOnPremiseTLSModeEnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseTLSModeEnabledObservation. +func (in *ConnectionOptionsOnPremiseTLSModeEnabledObservation) DeepCopy() *ConnectionOptionsOnPremiseTLSModeEnabledObservation { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseTLSModeEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseTLSModeEnabledParameters) DeepCopyInto(out *ConnectionOptionsOnPremiseTLSModeEnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseTLSModeEnabledParameters. +func (in *ConnectionOptionsOnPremiseTLSModeEnabledParameters) DeepCopy() *ConnectionOptionsOnPremiseTLSModeEnabledParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseTLSModeEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseTLSModeInitParameters) DeepCopyInto(out *ConnectionOptionsOnPremiseTLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]ConnectionOptionsOnPremiseTLSModeDisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]ConnectionOptionsOnPremiseTLSModeEnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseTLSModeInitParameters. +func (in *ConnectionOptionsOnPremiseTLSModeInitParameters) DeepCopy() *ConnectionOptionsOnPremiseTLSModeInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseTLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseTLSModeObservation) DeepCopyInto(out *ConnectionOptionsOnPremiseTLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]ConnectionOptionsOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]ConnectionOptionsOnPremiseTLSModeEnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseTLSModeObservation. +func (in *ConnectionOptionsOnPremiseTLSModeObservation) DeepCopy() *ConnectionOptionsOnPremiseTLSModeObservation { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseTLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsOnPremiseTLSModeParameters) DeepCopyInto(out *ConnectionOptionsOnPremiseTLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]ConnectionOptionsOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]ConnectionOptionsOnPremiseTLSModeEnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsOnPremiseTLSModeParameters. +func (in *ConnectionOptionsOnPremiseTLSModeParameters) DeepCopy() *ConnectionOptionsOnPremiseTLSModeParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsOnPremiseTLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsParameters) DeepCopyInto(out *ConnectionOptionsParameters) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]OnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]PasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsParameters. +func (in *ConnectionOptionsParameters) DeepCopy() *ConnectionOptionsParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsPasswordInitParameters) DeepCopyInto(out *ConnectionOptionsPasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsPasswordInitParameters. +func (in *ConnectionOptionsPasswordInitParameters) DeepCopy() *ConnectionOptionsPasswordInitParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsPasswordObservation) DeepCopyInto(out *ConnectionOptionsPasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsPasswordObservation. +func (in *ConnectionOptionsPasswordObservation) DeepCopy() *ConnectionOptionsPasswordObservation { + if in == nil { + return nil + } + out := new(ConnectionOptionsPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionOptionsPasswordParameters) DeepCopyInto(out *ConnectionOptionsPasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionOptionsPasswordParameters. +func (in *ConnectionOptionsPasswordParameters) DeepCopy() *ConnectionOptionsPasswordParameters { + if in == nil { + return nil + } + out := new(ConnectionOptionsPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionParameters) DeepCopyInto(out *ConnectionParameters) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]ConnectionOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionParameters. +func (in *ConnectionParameters) DeepCopy() *ConnectionParameters { + if in == nil { + return nil + } + out := new(ConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConvertToStringInitParameters) DeepCopyInto(out *ConvertToStringInitParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]ColumnsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]TablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConvertToStringInitParameters. +func (in *ConvertToStringInitParameters) DeepCopy() *ConvertToStringInitParameters { + if in == nil { + return nil + } + out := new(ConvertToStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConvertToStringObservation) DeepCopyInto(out *ConvertToStringObservation) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]ColumnsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]TablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConvertToStringObservation. +func (in *ConvertToStringObservation) DeepCopy() *ConvertToStringObservation { + if in == nil { + return nil + } + out := new(ConvertToStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConvertToStringParameters) DeepCopyInto(out *ConvertToStringParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]ColumnsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]TablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConvertToStringParameters. +func (in *ConvertToStringParameters) DeepCopy() *ConvertToStringParameters { + if in == nil { + return nil + } + out := new(ConvertToStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMappingInitParameters) DeepCopyInto(out *CustomMappingInitParameters) { + *out = *in + if in.ColumnName != nil { + in, out := &in.ColumnName, &out.ColumnName + *out = new(string) + **out = **in + } + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = make([]MappingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMappingInitParameters. +func (in *CustomMappingInitParameters) DeepCopy() *CustomMappingInitParameters { + if in == nil { + return nil + } + out := new(CustomMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMappingObservation) DeepCopyInto(out *CustomMappingObservation) { + *out = *in + if in.ColumnName != nil { + in, out := &in.ColumnName, &out.ColumnName + *out = new(string) + **out = **in + } + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = make([]MappingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMappingObservation. +func (in *CustomMappingObservation) DeepCopy() *CustomMappingObservation { + if in == nil { + return nil + } + out := new(CustomMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMappingParameters) DeepCopyInto(out *CustomMappingParameters) { + *out = *in + if in.ColumnName != nil { + in, out := &in.ColumnName, &out.ColumnName + *out = new(string) + **out = **in + } + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = make([]MappingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMappingParameters. +func (in *CustomMappingParameters) DeepCopy() *CustomMappingParameters { + if in == nil { + return nil + } + out := new(CustomMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSchemaFieldsFieldsInitParameters) DeepCopyInto(out *DataSchemaFieldsFieldsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSchemaFieldsFieldsInitParameters. +func (in *DataSchemaFieldsFieldsInitParameters) DeepCopy() *DataSchemaFieldsFieldsInitParameters { + if in == nil { + return nil + } + out := new(DataSchemaFieldsFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSchemaFieldsFieldsObservation) DeepCopyInto(out *DataSchemaFieldsFieldsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSchemaFieldsFieldsObservation. +func (in *DataSchemaFieldsFieldsObservation) DeepCopy() *DataSchemaFieldsFieldsObservation { + if in == nil { + return nil + } + out := new(DataSchemaFieldsFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSchemaFieldsFieldsParameters) DeepCopyInto(out *DataSchemaFieldsFieldsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSchemaFieldsFieldsParameters. +func (in *DataSchemaFieldsFieldsParameters) DeepCopy() *DataSchemaFieldsFieldsParameters { + if in == nil { + return nil + } + out := new(DataSchemaFieldsFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSchemaFieldsInitParameters) DeepCopyInto(out *DataSchemaFieldsInitParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]DataSchemaFieldsFieldsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSchemaFieldsInitParameters. +func (in *DataSchemaFieldsInitParameters) DeepCopy() *DataSchemaFieldsInitParameters { + if in == nil { + return nil + } + out := new(DataSchemaFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSchemaFieldsObservation) DeepCopyInto(out *DataSchemaFieldsObservation) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]DataSchemaFieldsFieldsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSchemaFieldsObservation. +func (in *DataSchemaFieldsObservation) DeepCopy() *DataSchemaFieldsObservation { + if in == nil { + return nil + } + out := new(DataSchemaFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSchemaFieldsParameters) DeepCopyInto(out *DataSchemaFieldsParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]DataSchemaFieldsFieldsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSchemaFieldsParameters. +func (in *DataSchemaFieldsParameters) DeepCopy() *DataSchemaFieldsParameters { + if in == nil { + return nil + } + out := new(DataSchemaFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSchemaInitParameters) DeepCopyInto(out *DataSchemaInitParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]FieldsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSchemaInitParameters. +func (in *DataSchemaInitParameters) DeepCopy() *DataSchemaInitParameters { + if in == nil { + return nil + } + out := new(DataSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSchemaObservation) DeepCopyInto(out *DataSchemaObservation) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]FieldsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSchemaObservation. +func (in *DataSchemaObservation) DeepCopy() *DataSchemaObservation { + if in == nil { + return nil + } + out := new(DataSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSchemaParameters) DeepCopyInto(out *DataSchemaParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]FieldsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSchemaParameters. +func (in *DataSchemaParameters) DeepCopy() *DataSchemaParameters { + if in == nil { + return nil + } + out := new(DataSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisabledInitParameters) DeepCopyInto(out *DisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisabledInitParameters. +func (in *DisabledInitParameters) DeepCopy() *DisabledInitParameters { + if in == nil { + return nil + } + out := new(DisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisabledObservation) DeepCopyInto(out *DisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisabledObservation. +func (in *DisabledObservation) DeepCopy() *DisabledObservation { + if in == nil { + return nil + } + out := new(DisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisabledParameters) DeepCopyInto(out *DisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisabledParameters. +func (in *DisabledParameters) DeepCopy() *DisabledParameters { + if in == nil { + return nil + } + out := new(DisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnabledInitParameters) DeepCopyInto(out *EnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnabledInitParameters. +func (in *EnabledInitParameters) DeepCopy() *EnabledInitParameters { + if in == nil { + return nil + } + out := new(EnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnabledObservation) DeepCopyInto(out *EnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnabledObservation. +func (in *EnabledObservation) DeepCopy() *EnabledObservation { + if in == nil { + return nil + } + out := new(EnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnabledParameters) DeepCopyInto(out *EnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnabledParameters. +func (in *EnabledParameters) DeepCopy() *EnabledParameters { + if in == nil { + return nil + } + out := new(EnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters. +func (in *EndpointInitParameters) DeepCopy() *EndpointInitParameters { + if in == nil { + return nil + } + out := new(EndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointList) DeepCopyInto(out *EndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointList. +func (in *EndpointList) DeepCopy() *EndpointList { + if in == nil { + return nil + } + out := new(EndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation. +func (in *EndpointObservation) DeepCopy() *EndpointObservation { + if in == nil { + return nil + } + out := new(EndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters. +func (in *EndpointParameters) DeepCopy() *EndpointParameters { + if in == nil { + return nil + } + out := new(EndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSpec) DeepCopyInto(out *EndpointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSpec. +func (in *EndpointSpec) DeepCopy() *EndpointSpec { + if in == nil { + return nil + } + out := new(EndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus. +func (in *EndpointStatus) DeepCopy() *EndpointStatus { + if in == nil { + return nil + } + out := new(EndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludedCollectionsInitParameters) DeepCopyInto(out *ExcludedCollectionsInitParameters) { + *out = *in + if in.CollectionName != nil { + in, out := &in.CollectionName, &out.CollectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludedCollectionsInitParameters. +func (in *ExcludedCollectionsInitParameters) DeepCopy() *ExcludedCollectionsInitParameters { + if in == nil { + return nil + } + out := new(ExcludedCollectionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludedCollectionsObservation) DeepCopyInto(out *ExcludedCollectionsObservation) { + *out = *in + if in.CollectionName != nil { + in, out := &in.CollectionName, &out.CollectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludedCollectionsObservation. +func (in *ExcludedCollectionsObservation) DeepCopy() *ExcludedCollectionsObservation { + if in == nil { + return nil + } + out := new(ExcludedCollectionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludedCollectionsParameters) DeepCopyInto(out *ExcludedCollectionsParameters) { + *out = *in + if in.CollectionName != nil { + in, out := &in.CollectionName, &out.CollectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludedCollectionsParameters. +func (in *ExcludedCollectionsParameters) DeepCopy() *ExcludedCollectionsParameters { + if in == nil { + return nil + } + out := new(ExcludedCollectionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldsFieldsInitParameters) DeepCopyInto(out *FieldsFieldsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsFieldsInitParameters. +func (in *FieldsFieldsInitParameters) DeepCopy() *FieldsFieldsInitParameters { + if in == nil { + return nil + } + out := new(FieldsFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldsFieldsObservation) DeepCopyInto(out *FieldsFieldsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsFieldsObservation. +func (in *FieldsFieldsObservation) DeepCopy() *FieldsFieldsObservation { + if in == nil { + return nil + } + out := new(FieldsFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldsFieldsParameters) DeepCopyInto(out *FieldsFieldsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsFieldsParameters. +func (in *FieldsFieldsParameters) DeepCopy() *FieldsFieldsParameters { + if in == nil { + return nil + } + out := new(FieldsFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldsInitParameters) DeepCopyInto(out *FieldsInitParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]FieldsFieldsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsInitParameters. +func (in *FieldsInitParameters) DeepCopy() *FieldsInitParameters { + if in == nil { + return nil + } + out := new(FieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldsObservation) DeepCopyInto(out *FieldsObservation) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]FieldsFieldsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsObservation. +func (in *FieldsObservation) DeepCopy() *FieldsObservation { + if in == nil { + return nil + } + out := new(FieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldsParameters) DeepCopyInto(out *FieldsParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]FieldsFieldsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsParameters. +func (in *FieldsParameters) DeepCopy() *FieldsParameters { + if in == nil { + return nil + } + out := new(FieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterColumnsColumnsInitParameters) DeepCopyInto(out *FilterColumnsColumnsInitParameters) { + *out = *in + if in.ExcludeColumns != nil { + in, out := &in.ExcludeColumns, &out.ExcludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeColumns != nil { + in, out := &in.IncludeColumns, &out.IncludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterColumnsColumnsInitParameters. +func (in *FilterColumnsColumnsInitParameters) DeepCopy() *FilterColumnsColumnsInitParameters { + if in == nil { + return nil + } + out := new(FilterColumnsColumnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterColumnsColumnsObservation) DeepCopyInto(out *FilterColumnsColumnsObservation) { + *out = *in + if in.ExcludeColumns != nil { + in, out := &in.ExcludeColumns, &out.ExcludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeColumns != nil { + in, out := &in.IncludeColumns, &out.IncludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterColumnsColumnsObservation. +func (in *FilterColumnsColumnsObservation) DeepCopy() *FilterColumnsColumnsObservation { + if in == nil { + return nil + } + out := new(FilterColumnsColumnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterColumnsColumnsParameters) DeepCopyInto(out *FilterColumnsColumnsParameters) { + *out = *in + if in.ExcludeColumns != nil { + in, out := &in.ExcludeColumns, &out.ExcludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeColumns != nil { + in, out := &in.IncludeColumns, &out.IncludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterColumnsColumnsParameters. +func (in *FilterColumnsColumnsParameters) DeepCopy() *FilterColumnsColumnsParameters { + if in == nil { + return nil + } + out := new(FilterColumnsColumnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterColumnsInitParameters) DeepCopyInto(out *FilterColumnsInitParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]FilterColumnsColumnsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]FilterColumnsTablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterColumnsInitParameters. +func (in *FilterColumnsInitParameters) DeepCopy() *FilterColumnsInitParameters { + if in == nil { + return nil + } + out := new(FilterColumnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterColumnsObservation) DeepCopyInto(out *FilterColumnsObservation) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]FilterColumnsColumnsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]FilterColumnsTablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterColumnsObservation. +func (in *FilterColumnsObservation) DeepCopy() *FilterColumnsObservation { + if in == nil { + return nil + } + out := new(FilterColumnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterColumnsParameters) DeepCopyInto(out *FilterColumnsParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]FilterColumnsColumnsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]FilterColumnsTablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterColumnsParameters. +func (in *FilterColumnsParameters) DeepCopy() *FilterColumnsParameters { + if in == nil { + return nil + } + out := new(FilterColumnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterColumnsTablesInitParameters) DeepCopyInto(out *FilterColumnsTablesInitParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterColumnsTablesInitParameters. +func (in *FilterColumnsTablesInitParameters) DeepCopy() *FilterColumnsTablesInitParameters { + if in == nil { + return nil + } + out := new(FilterColumnsTablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterColumnsTablesObservation) DeepCopyInto(out *FilterColumnsTablesObservation) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterColumnsTablesObservation. +func (in *FilterColumnsTablesObservation) DeepCopy() *FilterColumnsTablesObservation { + if in == nil { + return nil + } + out := new(FilterColumnsTablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterColumnsTablesParameters) DeepCopyInto(out *FilterColumnsTablesParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterColumnsTablesParameters. +func (in *FilterColumnsTablesParameters) DeepCopy() *FilterColumnsTablesParameters { + if in == nil { + return nil + } + out := new(FilterColumnsTablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterRowsInitParameters) DeepCopyInto(out *FilterRowsInitParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]FilterRowsTablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterRowsInitParameters. +func (in *FilterRowsInitParameters) DeepCopy() *FilterRowsInitParameters { + if in == nil { + return nil + } + out := new(FilterRowsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterRowsObservation) DeepCopyInto(out *FilterRowsObservation) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]FilterRowsTablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterRowsObservation. +func (in *FilterRowsObservation) DeepCopy() *FilterRowsObservation { + if in == nil { + return nil + } + out := new(FilterRowsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterRowsParameters) DeepCopyInto(out *FilterRowsParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]FilterRowsTablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterRowsParameters. +func (in *FilterRowsParameters) DeepCopy() *FilterRowsParameters { + if in == nil { + return nil + } + out := new(FilterRowsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterRowsTablesInitParameters) DeepCopyInto(out *FilterRowsTablesInitParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterRowsTablesInitParameters. +func (in *FilterRowsTablesInitParameters) DeepCopy() *FilterRowsTablesInitParameters { + if in == nil { + return nil + } + out := new(FilterRowsTablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterRowsTablesObservation) DeepCopyInto(out *FilterRowsTablesObservation) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterRowsTablesObservation. +func (in *FilterRowsTablesObservation) DeepCopy() *FilterRowsTablesObservation { + if in == nil { + return nil + } + out := new(FilterRowsTablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterRowsTablesParameters) DeepCopyInto(out *FilterRowsTablesParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterRowsTablesParameters. +func (in *FilterRowsTablesParameters) DeepCopy() *FilterRowsTablesParameters { + if in == nil { + return nil + } + out := new(FilterRowsTablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionInitParameters) DeepCopyInto(out *FunctionInitParameters) { + *out = *in + if in.MaskFunctionHash != nil { + in, out := &in.MaskFunctionHash, &out.MaskFunctionHash + *out = make([]MaskFunctionHashInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionInitParameters. +func (in *FunctionInitParameters) DeepCopy() *FunctionInitParameters { + if in == nil { + return nil + } + out := new(FunctionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionObservation) DeepCopyInto(out *FunctionObservation) { + *out = *in + if in.MaskFunctionHash != nil { + in, out := &in.MaskFunctionHash, &out.MaskFunctionHash + *out = make([]MaskFunctionHashObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionObservation. +func (in *FunctionObservation) DeepCopy() *FunctionObservation { + if in == nil { + return nil + } + out := new(FunctionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionParameters) DeepCopyInto(out *FunctionParameters) { + *out = *in + if in.MaskFunctionHash != nil { + in, out := &in.MaskFunctionHash, &out.MaskFunctionHash + *out = make([]MaskFunctionHashParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionParameters. +func (in *FunctionParameters) DeepCopy() *FunctionParameters { + if in == nil { + return nil + } + out := new(FunctionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserDataSchemaFieldsFieldsInitParameters) DeepCopyInto(out *JSONParserDataSchemaFieldsFieldsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserDataSchemaFieldsFieldsInitParameters. +func (in *JSONParserDataSchemaFieldsFieldsInitParameters) DeepCopy() *JSONParserDataSchemaFieldsFieldsInitParameters { + if in == nil { + return nil + } + out := new(JSONParserDataSchemaFieldsFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserDataSchemaFieldsFieldsObservation) DeepCopyInto(out *JSONParserDataSchemaFieldsFieldsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserDataSchemaFieldsFieldsObservation. +func (in *JSONParserDataSchemaFieldsFieldsObservation) DeepCopy() *JSONParserDataSchemaFieldsFieldsObservation { + if in == nil { + return nil + } + out := new(JSONParserDataSchemaFieldsFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserDataSchemaFieldsFieldsParameters) DeepCopyInto(out *JSONParserDataSchemaFieldsFieldsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserDataSchemaFieldsFieldsParameters. +func (in *JSONParserDataSchemaFieldsFieldsParameters) DeepCopy() *JSONParserDataSchemaFieldsFieldsParameters { + if in == nil { + return nil + } + out := new(JSONParserDataSchemaFieldsFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserDataSchemaFieldsInitParameters) DeepCopyInto(out *JSONParserDataSchemaFieldsInitParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]JSONParserDataSchemaFieldsFieldsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserDataSchemaFieldsInitParameters. +func (in *JSONParserDataSchemaFieldsInitParameters) DeepCopy() *JSONParserDataSchemaFieldsInitParameters { + if in == nil { + return nil + } + out := new(JSONParserDataSchemaFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserDataSchemaFieldsObservation) DeepCopyInto(out *JSONParserDataSchemaFieldsObservation) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]JSONParserDataSchemaFieldsFieldsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserDataSchemaFieldsObservation. +func (in *JSONParserDataSchemaFieldsObservation) DeepCopy() *JSONParserDataSchemaFieldsObservation { + if in == nil { + return nil + } + out := new(JSONParserDataSchemaFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserDataSchemaFieldsParameters) DeepCopyInto(out *JSONParserDataSchemaFieldsParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]JSONParserDataSchemaFieldsFieldsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserDataSchemaFieldsParameters. +func (in *JSONParserDataSchemaFieldsParameters) DeepCopy() *JSONParserDataSchemaFieldsParameters { + if in == nil { + return nil + } + out := new(JSONParserDataSchemaFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserDataSchemaInitParameters) DeepCopyInto(out *JSONParserDataSchemaInitParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]JSONParserDataSchemaFieldsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserDataSchemaInitParameters. +func (in *JSONParserDataSchemaInitParameters) DeepCopy() *JSONParserDataSchemaInitParameters { + if in == nil { + return nil + } + out := new(JSONParserDataSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserDataSchemaObservation) DeepCopyInto(out *JSONParserDataSchemaObservation) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]JSONParserDataSchemaFieldsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserDataSchemaObservation. +func (in *JSONParserDataSchemaObservation) DeepCopy() *JSONParserDataSchemaObservation { + if in == nil { + return nil + } + out := new(JSONParserDataSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserDataSchemaParameters) DeepCopyInto(out *JSONParserDataSchemaParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]JSONParserDataSchemaFieldsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserDataSchemaParameters. +func (in *JSONParserDataSchemaParameters) DeepCopy() *JSONParserDataSchemaParameters { + if in == nil { + return nil + } + out := new(JSONParserDataSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserInitParameters) DeepCopyInto(out *JSONParserInitParameters) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]DataSchemaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserInitParameters. +func (in *JSONParserInitParameters) DeepCopy() *JSONParserInitParameters { + if in == nil { + return nil + } + out := new(JSONParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserObservation) DeepCopyInto(out *JSONParserObservation) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]DataSchemaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserObservation. +func (in *JSONParserObservation) DeepCopy() *JSONParserObservation { + if in == nil { + return nil + } + out := new(JSONParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParserParameters) DeepCopyInto(out *JSONParserParameters) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]DataSchemaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParserParameters. +func (in *JSONParserParameters) DeepCopy() *JSONParserParameters { + if in == nil { + return nil + } + out := new(JSONParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSourceConnectionInitParameters) DeepCopyInto(out *KafkaSourceConnectionInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]ConnectionOnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSourceConnectionInitParameters. +func (in *KafkaSourceConnectionInitParameters) DeepCopy() *KafkaSourceConnectionInitParameters { + if in == nil { + return nil + } + out := new(KafkaSourceConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSourceConnectionObservation) DeepCopyInto(out *KafkaSourceConnectionObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]ConnectionOnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSourceConnectionObservation. +func (in *KafkaSourceConnectionObservation) DeepCopy() *KafkaSourceConnectionObservation { + if in == nil { + return nil + } + out := new(KafkaSourceConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSourceConnectionParameters) DeepCopyInto(out *KafkaSourceConnectionParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]ConnectionOnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSourceConnectionParameters. +func (in *KafkaSourceConnectionParameters) DeepCopy() *KafkaSourceConnectionParameters { + if in == nil { + return nil + } + out := new(KafkaSourceConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSourceInitParameters) DeepCopyInto(out *KafkaSourceInitParameters) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = make([]AuthInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]KafkaSourceConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parser != nil { + in, out := &in.Parser, &out.Parser + *out = make([]ParserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } + if in.TopicNames != nil { + in, out := &in.TopicNames, &out.TopicNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Transformer != nil { + in, out := &in.Transformer, &out.Transformer + *out = make([]TransformerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSourceInitParameters. +func (in *KafkaSourceInitParameters) DeepCopy() *KafkaSourceInitParameters { + if in == nil { + return nil + } + out := new(KafkaSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSourceObservation) DeepCopyInto(out *KafkaSourceObservation) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = make([]AuthObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]KafkaSourceConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parser != nil { + in, out := &in.Parser, &out.Parser + *out = make([]ParserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } + if in.TopicNames != nil { + in, out := &in.TopicNames, &out.TopicNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Transformer != nil { + in, out := &in.Transformer, &out.Transformer + *out = make([]TransformerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSourceObservation. +func (in *KafkaSourceObservation) DeepCopy() *KafkaSourceObservation { + if in == nil { + return nil + } + out := new(KafkaSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSourceParameters) DeepCopyInto(out *KafkaSourceParameters) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = make([]AuthParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]KafkaSourceConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parser != nil { + in, out := &in.Parser, &out.Parser + *out = make([]ParserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } + if in.TopicNames != nil { + in, out := &in.TopicNames, &out.TopicNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Transformer != nil { + in, out := &in.Transformer, &out.Transformer + *out = make([]TransformerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSourceParameters. +func (in *KafkaSourceParameters) DeepCopy() *KafkaSourceParameters { + if in == nil { + return nil + } + out := new(KafkaSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetAuthInitParameters) DeepCopyInto(out *KafkaTargetAuthInitParameters) { + *out = *in + if in.NoAuth != nil { + in, out := &in.NoAuth, &out.NoAuth + *out = make([]AuthNoAuthInitParameters, len(*in)) + copy(*out, *in) + } + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = make([]AuthSaslInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetAuthInitParameters. +func (in *KafkaTargetAuthInitParameters) DeepCopy() *KafkaTargetAuthInitParameters { + if in == nil { + return nil + } + out := new(KafkaTargetAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetAuthObservation) DeepCopyInto(out *KafkaTargetAuthObservation) { + *out = *in + if in.NoAuth != nil { + in, out := &in.NoAuth, &out.NoAuth + *out = make([]AuthNoAuthParameters, len(*in)) + copy(*out, *in) + } + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = make([]AuthSaslObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetAuthObservation. +func (in *KafkaTargetAuthObservation) DeepCopy() *KafkaTargetAuthObservation { + if in == nil { + return nil + } + out := new(KafkaTargetAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetAuthParameters) DeepCopyInto(out *KafkaTargetAuthParameters) { + *out = *in + if in.NoAuth != nil { + in, out := &in.NoAuth, &out.NoAuth + *out = make([]AuthNoAuthParameters, len(*in)) + copy(*out, *in) + } + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = make([]AuthSaslParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetAuthParameters. +func (in *KafkaTargetAuthParameters) DeepCopy() *KafkaTargetAuthParameters { + if in == nil { + return nil + } + out := new(KafkaTargetAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetConnectionInitParameters) DeepCopyInto(out *KafkaTargetConnectionInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]KafkaTargetConnectionOnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetConnectionInitParameters. +func (in *KafkaTargetConnectionInitParameters) DeepCopy() *KafkaTargetConnectionInitParameters { + if in == nil { + return nil + } + out := new(KafkaTargetConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetConnectionObservation) DeepCopyInto(out *KafkaTargetConnectionObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]KafkaTargetConnectionOnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetConnectionObservation. +func (in *KafkaTargetConnectionObservation) DeepCopy() *KafkaTargetConnectionObservation { + if in == nil { + return nil + } + out := new(KafkaTargetConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetConnectionOnPremiseInitParameters) DeepCopyInto(out *KafkaTargetConnectionOnPremiseInitParameters) { + *out = *in + if in.BrokerUrls != nil { + in, out := &in.BrokerUrls, &out.BrokerUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]KafkaTargetConnectionOnPremiseTLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetConnectionOnPremiseInitParameters. +func (in *KafkaTargetConnectionOnPremiseInitParameters) DeepCopy() *KafkaTargetConnectionOnPremiseInitParameters { + if in == nil { + return nil + } + out := new(KafkaTargetConnectionOnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetConnectionOnPremiseObservation) DeepCopyInto(out *KafkaTargetConnectionOnPremiseObservation) { + *out = *in + if in.BrokerUrls != nil { + in, out := &in.BrokerUrls, &out.BrokerUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]KafkaTargetConnectionOnPremiseTLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetConnectionOnPremiseObservation. +func (in *KafkaTargetConnectionOnPremiseObservation) DeepCopy() *KafkaTargetConnectionOnPremiseObservation { + if in == nil { + return nil + } + out := new(KafkaTargetConnectionOnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetConnectionOnPremiseParameters) DeepCopyInto(out *KafkaTargetConnectionOnPremiseParameters) { + *out = *in + if in.BrokerUrls != nil { + in, out := &in.BrokerUrls, &out.BrokerUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]KafkaTargetConnectionOnPremiseTLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetConnectionOnPremiseParameters. +func (in *KafkaTargetConnectionOnPremiseParameters) DeepCopy() *KafkaTargetConnectionOnPremiseParameters { + if in == nil { + return nil + } + out := new(KafkaTargetConnectionOnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetConnectionOnPremiseTLSModeInitParameters) DeepCopyInto(out *KafkaTargetConnectionOnPremiseTLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]ConnectionOnPremiseTLSModeDisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]ConnectionOnPremiseTLSModeEnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetConnectionOnPremiseTLSModeInitParameters. +func (in *KafkaTargetConnectionOnPremiseTLSModeInitParameters) DeepCopy() *KafkaTargetConnectionOnPremiseTLSModeInitParameters { + if in == nil { + return nil + } + out := new(KafkaTargetConnectionOnPremiseTLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetConnectionOnPremiseTLSModeObservation) DeepCopyInto(out *KafkaTargetConnectionOnPremiseTLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]ConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]ConnectionOnPremiseTLSModeEnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetConnectionOnPremiseTLSModeObservation. +func (in *KafkaTargetConnectionOnPremiseTLSModeObservation) DeepCopy() *KafkaTargetConnectionOnPremiseTLSModeObservation { + if in == nil { + return nil + } + out := new(KafkaTargetConnectionOnPremiseTLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetConnectionOnPremiseTLSModeParameters) DeepCopyInto(out *KafkaTargetConnectionOnPremiseTLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]ConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]ConnectionOnPremiseTLSModeEnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetConnectionOnPremiseTLSModeParameters. +func (in *KafkaTargetConnectionOnPremiseTLSModeParameters) DeepCopy() *KafkaTargetConnectionOnPremiseTLSModeParameters { + if in == nil { + return nil + } + out := new(KafkaTargetConnectionOnPremiseTLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetConnectionParameters) DeepCopyInto(out *KafkaTargetConnectionParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]KafkaTargetConnectionOnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetConnectionParameters. +func (in *KafkaTargetConnectionParameters) DeepCopy() *KafkaTargetConnectionParameters { + if in == nil { + return nil + } + out := new(KafkaTargetConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetInitParameters) DeepCopyInto(out *KafkaTargetInitParameters) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = make([]KafkaTargetAuthInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]KafkaTargetConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = make([]SerializerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopicSettings != nil { + in, out := &in.TopicSettings, &out.TopicSettings + *out = make([]TopicSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetInitParameters. +func (in *KafkaTargetInitParameters) DeepCopy() *KafkaTargetInitParameters { + if in == nil { + return nil + } + out := new(KafkaTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetObservation) DeepCopyInto(out *KafkaTargetObservation) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = make([]KafkaTargetAuthObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]KafkaTargetConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = make([]SerializerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopicSettings != nil { + in, out := &in.TopicSettings, &out.TopicSettings + *out = make([]TopicSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetObservation. +func (in *KafkaTargetObservation) DeepCopy() *KafkaTargetObservation { + if in == nil { + return nil + } + out := new(KafkaTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTargetParameters) DeepCopyInto(out *KafkaTargetParameters) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = make([]KafkaTargetAuthParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]KafkaTargetConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = make([]SerializerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopicSettings != nil { + in, out := &in.TopicSettings, &out.TopicSettings + *out = make([]TopicSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTargetParameters. +func (in *KafkaTargetParameters) DeepCopy() *KafkaTargetParameters { + if in == nil { + return nil + } + out := new(KafkaTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingInitParameters) DeepCopyInto(out *MappingInitParameters) { + *out = *in + if in.ColumnValue != nil { + in, out := &in.ColumnValue, &out.ColumnValue + *out = make([]ColumnValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingInitParameters. +func (in *MappingInitParameters) DeepCopy() *MappingInitParameters { + if in == nil { + return nil + } + out := new(MappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingObservation) DeepCopyInto(out *MappingObservation) { + *out = *in + if in.ColumnValue != nil { + in, out := &in.ColumnValue, &out.ColumnValue + *out = make([]ColumnValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingObservation. +func (in *MappingObservation) DeepCopy() *MappingObservation { + if in == nil { + return nil + } + out := new(MappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParameters) DeepCopyInto(out *MappingParameters) { + *out = *in + if in.ColumnValue != nil { + in, out := &in.ColumnValue, &out.ColumnValue + *out = make([]ColumnValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParameters. +func (in *MappingParameters) DeepCopy() *MappingParameters { + if in == nil { + return nil + } + out := new(MappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaskFieldInitParameters) DeepCopyInto(out *MaskFieldInitParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = make([]FunctionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]MaskFieldTablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaskFieldInitParameters. +func (in *MaskFieldInitParameters) DeepCopy() *MaskFieldInitParameters { + if in == nil { + return nil + } + out := new(MaskFieldInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaskFieldObservation) DeepCopyInto(out *MaskFieldObservation) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = make([]FunctionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]MaskFieldTablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaskFieldObservation. +func (in *MaskFieldObservation) DeepCopy() *MaskFieldObservation { + if in == nil { + return nil + } + out := new(MaskFieldObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaskFieldParameters) DeepCopyInto(out *MaskFieldParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = make([]FunctionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]MaskFieldTablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaskFieldParameters. +func (in *MaskFieldParameters) DeepCopy() *MaskFieldParameters { + if in == nil { + return nil + } + out := new(MaskFieldParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaskFieldTablesInitParameters) DeepCopyInto(out *MaskFieldTablesInitParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaskFieldTablesInitParameters. +func (in *MaskFieldTablesInitParameters) DeepCopy() *MaskFieldTablesInitParameters { + if in == nil { + return nil + } + out := new(MaskFieldTablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaskFieldTablesObservation) DeepCopyInto(out *MaskFieldTablesObservation) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaskFieldTablesObservation. +func (in *MaskFieldTablesObservation) DeepCopy() *MaskFieldTablesObservation { + if in == nil { + return nil + } + out := new(MaskFieldTablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaskFieldTablesParameters) DeepCopyInto(out *MaskFieldTablesParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaskFieldTablesParameters. +func (in *MaskFieldTablesParameters) DeepCopy() *MaskFieldTablesParameters { + if in == nil { + return nil + } + out := new(MaskFieldTablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaskFunctionHashInitParameters) DeepCopyInto(out *MaskFunctionHashInitParameters) { + *out = *in + if in.UserDefinedSalt != nil { + in, out := &in.UserDefinedSalt, &out.UserDefinedSalt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaskFunctionHashInitParameters. +func (in *MaskFunctionHashInitParameters) DeepCopy() *MaskFunctionHashInitParameters { + if in == nil { + return nil + } + out := new(MaskFunctionHashInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaskFunctionHashObservation) DeepCopyInto(out *MaskFunctionHashObservation) { + *out = *in + if in.UserDefinedSalt != nil { + in, out := &in.UserDefinedSalt, &out.UserDefinedSalt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaskFunctionHashObservation. +func (in *MaskFunctionHashObservation) DeepCopy() *MaskFunctionHashObservation { + if in == nil { + return nil + } + out := new(MaskFunctionHashObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaskFunctionHashParameters) DeepCopyInto(out *MaskFunctionHashParameters) { + *out = *in + if in.UserDefinedSalt != nil { + in, out := &in.UserDefinedSalt, &out.UserDefinedSalt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaskFunctionHashParameters. +func (in *MaskFunctionHashParameters) DeepCopy() *MaskFunctionHashParameters { + if in == nil { + return nil + } + out := new(MaskFunctionHashParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetrikaSourceInitParameters) DeepCopyInto(out *MetrikaSourceInitParameters) { + *out = *in + if in.CounterIds != nil { + in, out := &in.CounterIds, &out.CounterIds + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]StreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = make([]TokenInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetrikaSourceInitParameters. +func (in *MetrikaSourceInitParameters) DeepCopy() *MetrikaSourceInitParameters { + if in == nil { + return nil + } + out := new(MetrikaSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetrikaSourceObservation) DeepCopyInto(out *MetrikaSourceObservation) { + *out = *in + if in.CounterIds != nil { + in, out := &in.CounterIds, &out.CounterIds + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]StreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = make([]TokenParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetrikaSourceObservation. +func (in *MetrikaSourceObservation) DeepCopy() *MetrikaSourceObservation { + if in == nil { + return nil + } + out := new(MetrikaSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetrikaSourceParameters) DeepCopyInto(out *MetrikaSourceParameters) { + *out = *in + if in.CounterIds != nil { + in, out := &in.CounterIds, &out.CounterIds + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]StreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = make([]TokenParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetrikaSourceParameters. +func (in *MetrikaSourceParameters) DeepCopy() *MetrikaSourceParameters { + if in == nil { + return nil + } + out := new(MetrikaSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoSourceConnectionConnectionOptionsInitParameters) DeepCopyInto(out *MongoSourceConnectionConnectionOptionsInitParameters) { + *out = *in + if in.AuthSource != nil { + in, out := &in.AuthSource, &out.AuthSource + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]ConnectionConnectionOptionsOnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]ConnectionConnectionOptionsPasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoSourceConnectionConnectionOptionsInitParameters. +func (in *MongoSourceConnectionConnectionOptionsInitParameters) DeepCopy() *MongoSourceConnectionConnectionOptionsInitParameters { + if in == nil { + return nil + } + out := new(MongoSourceConnectionConnectionOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoSourceConnectionConnectionOptionsObservation) DeepCopyInto(out *MongoSourceConnectionConnectionOptionsObservation) { + *out = *in + if in.AuthSource != nil { + in, out := &in.AuthSource, &out.AuthSource + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]ConnectionConnectionOptionsOnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]ConnectionConnectionOptionsPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoSourceConnectionConnectionOptionsObservation. +func (in *MongoSourceConnectionConnectionOptionsObservation) DeepCopy() *MongoSourceConnectionConnectionOptionsObservation { + if in == nil { + return nil + } + out := new(MongoSourceConnectionConnectionOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoSourceConnectionConnectionOptionsParameters) DeepCopyInto(out *MongoSourceConnectionConnectionOptionsParameters) { + *out = *in + if in.AuthSource != nil { + in, out := &in.AuthSource, &out.AuthSource + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]ConnectionConnectionOptionsOnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]ConnectionConnectionOptionsPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoSourceConnectionConnectionOptionsParameters. +func (in *MongoSourceConnectionConnectionOptionsParameters) DeepCopy() *MongoSourceConnectionConnectionOptionsParameters { + if in == nil { + return nil + } + out := new(MongoSourceConnectionConnectionOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoSourceConnectionInitParameters) DeepCopyInto(out *MongoSourceConnectionInitParameters) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]MongoSourceConnectionConnectionOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoSourceConnectionInitParameters. +func (in *MongoSourceConnectionInitParameters) DeepCopy() *MongoSourceConnectionInitParameters { + if in == nil { + return nil + } + out := new(MongoSourceConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoSourceConnectionObservation) DeepCopyInto(out *MongoSourceConnectionObservation) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]MongoSourceConnectionConnectionOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoSourceConnectionObservation. +func (in *MongoSourceConnectionObservation) DeepCopy() *MongoSourceConnectionObservation { + if in == nil { + return nil + } + out := new(MongoSourceConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoSourceConnectionParameters) DeepCopyInto(out *MongoSourceConnectionParameters) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]MongoSourceConnectionConnectionOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoSourceConnectionParameters. +func (in *MongoSourceConnectionParameters) DeepCopy() *MongoSourceConnectionParameters { + if in == nil { + return nil + } + out := new(MongoSourceConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoSourceInitParameters) DeepCopyInto(out *MongoSourceInitParameters) { + *out = *in + if in.Collections != nil { + in, out := &in.Collections, &out.Collections + *out = make([]CollectionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MongoSourceConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludedCollections != nil { + in, out := &in.ExcludedCollections, &out.ExcludedCollections + *out = make([]ExcludedCollectionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryPreferredMode != nil { + in, out := &in.SecondaryPreferredMode, &out.SecondaryPreferredMode + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoSourceInitParameters. +func (in *MongoSourceInitParameters) DeepCopy() *MongoSourceInitParameters { + if in == nil { + return nil + } + out := new(MongoSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoSourceObservation) DeepCopyInto(out *MongoSourceObservation) { + *out = *in + if in.Collections != nil { + in, out := &in.Collections, &out.Collections + *out = make([]CollectionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MongoSourceConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludedCollections != nil { + in, out := &in.ExcludedCollections, &out.ExcludedCollections + *out = make([]ExcludedCollectionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryPreferredMode != nil { + in, out := &in.SecondaryPreferredMode, &out.SecondaryPreferredMode + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoSourceObservation. +func (in *MongoSourceObservation) DeepCopy() *MongoSourceObservation { + if in == nil { + return nil + } + out := new(MongoSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoSourceParameters) DeepCopyInto(out *MongoSourceParameters) { + *out = *in + if in.Collections != nil { + in, out := &in.Collections, &out.Collections + *out = make([]CollectionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MongoSourceConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludedCollections != nil { + in, out := &in.ExcludedCollections, &out.ExcludedCollections + *out = make([]ExcludedCollectionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryPreferredMode != nil { + in, out := &in.SecondaryPreferredMode, &out.SecondaryPreferredMode + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoSourceParameters. +func (in *MongoSourceParameters) DeepCopy() *MongoSourceParameters { + if in == nil { + return nil + } + out := new(MongoSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionConnectionOptionsInitParameters) DeepCopyInto(out *MongoTargetConnectionConnectionOptionsInitParameters) { + *out = *in + if in.AuthSource != nil { + in, out := &in.AuthSource, &out.AuthSource + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]MongoTargetConnectionConnectionOptionsOnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]MongoTargetConnectionConnectionOptionsPasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionConnectionOptionsInitParameters. +func (in *MongoTargetConnectionConnectionOptionsInitParameters) DeepCopy() *MongoTargetConnectionConnectionOptionsInitParameters { + if in == nil { + return nil + } + out := new(MongoTargetConnectionConnectionOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionConnectionOptionsObservation) DeepCopyInto(out *MongoTargetConnectionConnectionOptionsObservation) { + *out = *in + if in.AuthSource != nil { + in, out := &in.AuthSource, &out.AuthSource + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]MongoTargetConnectionConnectionOptionsOnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]MongoTargetConnectionConnectionOptionsPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionConnectionOptionsObservation. +func (in *MongoTargetConnectionConnectionOptionsObservation) DeepCopy() *MongoTargetConnectionConnectionOptionsObservation { + if in == nil { + return nil + } + out := new(MongoTargetConnectionConnectionOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionConnectionOptionsOnPremiseInitParameters) DeepCopyInto(out *MongoTargetConnectionConnectionOptionsOnPremiseInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ReplicaSet != nil { + in, out := &in.ReplicaSet, &out.ReplicaSet + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]ConnectionConnectionOptionsOnPremiseTLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionConnectionOptionsOnPremiseInitParameters. +func (in *MongoTargetConnectionConnectionOptionsOnPremiseInitParameters) DeepCopy() *MongoTargetConnectionConnectionOptionsOnPremiseInitParameters { + if in == nil { + return nil + } + out := new(MongoTargetConnectionConnectionOptionsOnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionConnectionOptionsOnPremiseObservation) DeepCopyInto(out *MongoTargetConnectionConnectionOptionsOnPremiseObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ReplicaSet != nil { + in, out := &in.ReplicaSet, &out.ReplicaSet + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]ConnectionConnectionOptionsOnPremiseTLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionConnectionOptionsOnPremiseObservation. +func (in *MongoTargetConnectionConnectionOptionsOnPremiseObservation) DeepCopy() *MongoTargetConnectionConnectionOptionsOnPremiseObservation { + if in == nil { + return nil + } + out := new(MongoTargetConnectionConnectionOptionsOnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionConnectionOptionsOnPremiseParameters) DeepCopyInto(out *MongoTargetConnectionConnectionOptionsOnPremiseParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ReplicaSet != nil { + in, out := &in.ReplicaSet, &out.ReplicaSet + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]ConnectionConnectionOptionsOnPremiseTLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionConnectionOptionsOnPremiseParameters. +func (in *MongoTargetConnectionConnectionOptionsOnPremiseParameters) DeepCopy() *MongoTargetConnectionConnectionOptionsOnPremiseParameters { + if in == nil { + return nil + } + out := new(MongoTargetConnectionConnectionOptionsOnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionConnectionOptionsParameters) DeepCopyInto(out *MongoTargetConnectionConnectionOptionsParameters) { + *out = *in + if in.AuthSource != nil { + in, out := &in.AuthSource, &out.AuthSource + *out = new(string) + **out = **in + } + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]MongoTargetConnectionConnectionOptionsOnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]MongoTargetConnectionConnectionOptionsPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionConnectionOptionsParameters. +func (in *MongoTargetConnectionConnectionOptionsParameters) DeepCopy() *MongoTargetConnectionConnectionOptionsParameters { + if in == nil { + return nil + } + out := new(MongoTargetConnectionConnectionOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionConnectionOptionsPasswordInitParameters) DeepCopyInto(out *MongoTargetConnectionConnectionOptionsPasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionConnectionOptionsPasswordInitParameters. +func (in *MongoTargetConnectionConnectionOptionsPasswordInitParameters) DeepCopy() *MongoTargetConnectionConnectionOptionsPasswordInitParameters { + if in == nil { + return nil + } + out := new(MongoTargetConnectionConnectionOptionsPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionConnectionOptionsPasswordObservation) DeepCopyInto(out *MongoTargetConnectionConnectionOptionsPasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionConnectionOptionsPasswordObservation. +func (in *MongoTargetConnectionConnectionOptionsPasswordObservation) DeepCopy() *MongoTargetConnectionConnectionOptionsPasswordObservation { + if in == nil { + return nil + } + out := new(MongoTargetConnectionConnectionOptionsPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionConnectionOptionsPasswordParameters) DeepCopyInto(out *MongoTargetConnectionConnectionOptionsPasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionConnectionOptionsPasswordParameters. +func (in *MongoTargetConnectionConnectionOptionsPasswordParameters) DeepCopy() *MongoTargetConnectionConnectionOptionsPasswordParameters { + if in == nil { + return nil + } + out := new(MongoTargetConnectionConnectionOptionsPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionInitParameters) DeepCopyInto(out *MongoTargetConnectionInitParameters) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]MongoTargetConnectionConnectionOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionInitParameters. +func (in *MongoTargetConnectionInitParameters) DeepCopy() *MongoTargetConnectionInitParameters { + if in == nil { + return nil + } + out := new(MongoTargetConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionObservation) DeepCopyInto(out *MongoTargetConnectionObservation) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]MongoTargetConnectionConnectionOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionObservation. +func (in *MongoTargetConnectionObservation) DeepCopy() *MongoTargetConnectionObservation { + if in == nil { + return nil + } + out := new(MongoTargetConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetConnectionParameters) DeepCopyInto(out *MongoTargetConnectionParameters) { + *out = *in + if in.ConnectionOptions != nil { + in, out := &in.ConnectionOptions, &out.ConnectionOptions + *out = make([]MongoTargetConnectionConnectionOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetConnectionParameters. +func (in *MongoTargetConnectionParameters) DeepCopy() *MongoTargetConnectionParameters { + if in == nil { + return nil + } + out := new(MongoTargetConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetInitParameters) DeepCopyInto(out *MongoTargetInitParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MongoTargetConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetInitParameters. +func (in *MongoTargetInitParameters) DeepCopy() *MongoTargetInitParameters { + if in == nil { + return nil + } + out := new(MongoTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetObservation) DeepCopyInto(out *MongoTargetObservation) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MongoTargetConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetObservation. +func (in *MongoTargetObservation) DeepCopy() *MongoTargetObservation { + if in == nil { + return nil + } + out := new(MongoTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoTargetParameters) DeepCopyInto(out *MongoTargetParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MongoTargetConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoTargetParameters. +func (in *MongoTargetParameters) DeepCopy() *MongoTargetParameters { + if in == nil { + return nil + } + out := new(MongoTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionInitParameters) DeepCopyInto(out *MySQLSourceConnectionInitParameters) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]MySQLSourceConnectionOnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionInitParameters. +func (in *MySQLSourceConnectionInitParameters) DeepCopy() *MySQLSourceConnectionInitParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionObservation) DeepCopyInto(out *MySQLSourceConnectionObservation) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]MySQLSourceConnectionOnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionObservation. +func (in *MySQLSourceConnectionObservation) DeepCopy() *MySQLSourceConnectionObservation { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseInitParameters) DeepCopyInto(out *MySQLSourceConnectionOnPremiseInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]MySQLSourceConnectionOnPremiseTLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseInitParameters. +func (in *MySQLSourceConnectionOnPremiseInitParameters) DeepCopy() *MySQLSourceConnectionOnPremiseInitParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseObservation) DeepCopyInto(out *MySQLSourceConnectionOnPremiseObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]MySQLSourceConnectionOnPremiseTLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseObservation. +func (in *MySQLSourceConnectionOnPremiseObservation) DeepCopy() *MySQLSourceConnectionOnPremiseObservation { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseParameters) DeepCopyInto(out *MySQLSourceConnectionOnPremiseParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]MySQLSourceConnectionOnPremiseTLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseParameters. +func (in *MySQLSourceConnectionOnPremiseParameters) DeepCopy() *MySQLSourceConnectionOnPremiseParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopyInto(out *MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters. +func (in *MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopy() *MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseTLSModeDisabledObservation) DeepCopyInto(out *MySQLSourceConnectionOnPremiseTLSModeDisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseTLSModeDisabledObservation. +func (in *MySQLSourceConnectionOnPremiseTLSModeDisabledObservation) DeepCopy() *MySQLSourceConnectionOnPremiseTLSModeDisabledObservation { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseTLSModeDisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseTLSModeDisabledParameters) DeepCopyInto(out *MySQLSourceConnectionOnPremiseTLSModeDisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseTLSModeDisabledParameters. +func (in *MySQLSourceConnectionOnPremiseTLSModeDisabledParameters) DeepCopy() *MySQLSourceConnectionOnPremiseTLSModeDisabledParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseTLSModeDisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopyInto(out *MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters. +func (in *MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopy() *MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseTLSModeEnabledObservation) DeepCopyInto(out *MySQLSourceConnectionOnPremiseTLSModeEnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseTLSModeEnabledObservation. +func (in *MySQLSourceConnectionOnPremiseTLSModeEnabledObservation) DeepCopy() *MySQLSourceConnectionOnPremiseTLSModeEnabledObservation { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseTLSModeEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseTLSModeEnabledParameters) DeepCopyInto(out *MySQLSourceConnectionOnPremiseTLSModeEnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseTLSModeEnabledParameters. +func (in *MySQLSourceConnectionOnPremiseTLSModeEnabledParameters) DeepCopy() *MySQLSourceConnectionOnPremiseTLSModeEnabledParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseTLSModeEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseTLSModeInitParameters) DeepCopyInto(out *MySQLSourceConnectionOnPremiseTLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]MySQLSourceConnectionOnPremiseTLSModeDisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]MySQLSourceConnectionOnPremiseTLSModeEnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseTLSModeInitParameters. +func (in *MySQLSourceConnectionOnPremiseTLSModeInitParameters) DeepCopy() *MySQLSourceConnectionOnPremiseTLSModeInitParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseTLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseTLSModeObservation) DeepCopyInto(out *MySQLSourceConnectionOnPremiseTLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]MySQLSourceConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]MySQLSourceConnectionOnPremiseTLSModeEnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseTLSModeObservation. +func (in *MySQLSourceConnectionOnPremiseTLSModeObservation) DeepCopy() *MySQLSourceConnectionOnPremiseTLSModeObservation { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseTLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionOnPremiseTLSModeParameters) DeepCopyInto(out *MySQLSourceConnectionOnPremiseTLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]MySQLSourceConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]MySQLSourceConnectionOnPremiseTLSModeEnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionOnPremiseTLSModeParameters. +func (in *MySQLSourceConnectionOnPremiseTLSModeParameters) DeepCopy() *MySQLSourceConnectionOnPremiseTLSModeParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionOnPremiseTLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceConnectionParameters) DeepCopyInto(out *MySQLSourceConnectionParameters) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]MySQLSourceConnectionOnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceConnectionParameters. +func (in *MySQLSourceConnectionParameters) DeepCopy() *MySQLSourceConnectionParameters { + if in == nil { + return nil + } + out := new(MySQLSourceConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceInitParameters) DeepCopyInto(out *MySQLSourceInitParameters) { + *out = *in + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MySQLSourceConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseSelector != nil { + in, out := &in.DatabaseSelector, &out.DatabaseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExcludeTablesRegex != nil { + in, out := &in.ExcludeTablesRegex, &out.ExcludeTablesRegex + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTablesRegex != nil { + in, out := &in.IncludeTablesRegex, &out.IncludeTablesRegex + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectTransferSettings != nil { + in, out := &in.ObjectTransferSettings, &out.ObjectTransferSettings + *out = make([]ObjectTransferSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]MySQLSourcePasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceDatabase != nil { + in, out := &in.ServiceDatabase, &out.ServiceDatabase + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } + if in.UserRef != nil { + in, out := &in.UserRef, &out.UserRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSelector != nil { + in, out := &in.UserSelector, &out.UserSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceInitParameters. +func (in *MySQLSourceInitParameters) DeepCopy() *MySQLSourceInitParameters { + if in == nil { + return nil + } + out := new(MySQLSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceObservation) DeepCopyInto(out *MySQLSourceObservation) { + *out = *in + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MySQLSourceConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.ExcludeTablesRegex != nil { + in, out := &in.ExcludeTablesRegex, &out.ExcludeTablesRegex + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTablesRegex != nil { + in, out := &in.IncludeTablesRegex, &out.IncludeTablesRegex + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectTransferSettings != nil { + in, out := &in.ObjectTransferSettings, &out.ObjectTransferSettings + *out = make([]ObjectTransferSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]MySQLSourcePasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceDatabase != nil { + in, out := &in.ServiceDatabase, &out.ServiceDatabase + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceObservation. +func (in *MySQLSourceObservation) DeepCopy() *MySQLSourceObservation { + if in == nil { + return nil + } + out := new(MySQLSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourceParameters) DeepCopyInto(out *MySQLSourceParameters) { + *out = *in + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MySQLSourceConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseSelector != nil { + in, out := &in.DatabaseSelector, &out.DatabaseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExcludeTablesRegex != nil { + in, out := &in.ExcludeTablesRegex, &out.ExcludeTablesRegex + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTablesRegex != nil { + in, out := &in.IncludeTablesRegex, &out.IncludeTablesRegex + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectTransferSettings != nil { + in, out := &in.ObjectTransferSettings, &out.ObjectTransferSettings + *out = make([]ObjectTransferSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]MySQLSourcePasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceDatabase != nil { + in, out := &in.ServiceDatabase, &out.ServiceDatabase + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } + if in.UserRef != nil { + in, out := &in.UserRef, &out.UserRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSelector != nil { + in, out := &in.UserSelector, &out.UserSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourceParameters. +func (in *MySQLSourceParameters) DeepCopy() *MySQLSourceParameters { + if in == nil { + return nil + } + out := new(MySQLSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourcePasswordInitParameters) DeepCopyInto(out *MySQLSourcePasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourcePasswordInitParameters. +func (in *MySQLSourcePasswordInitParameters) DeepCopy() *MySQLSourcePasswordInitParameters { + if in == nil { + return nil + } + out := new(MySQLSourcePasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourcePasswordObservation) DeepCopyInto(out *MySQLSourcePasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourcePasswordObservation. +func (in *MySQLSourcePasswordObservation) DeepCopy() *MySQLSourcePasswordObservation { + if in == nil { + return nil + } + out := new(MySQLSourcePasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLSourcePasswordParameters) DeepCopyInto(out *MySQLSourcePasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSourcePasswordParameters. +func (in *MySQLSourcePasswordParameters) DeepCopy() *MySQLSourcePasswordParameters { + if in == nil { + return nil + } + out := new(MySQLSourcePasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionInitParameters) DeepCopyInto(out *MySQLTargetConnectionInitParameters) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]MySQLTargetConnectionOnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionInitParameters. +func (in *MySQLTargetConnectionInitParameters) DeepCopy() *MySQLTargetConnectionInitParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionObservation) DeepCopyInto(out *MySQLTargetConnectionObservation) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]MySQLTargetConnectionOnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionObservation. +func (in *MySQLTargetConnectionObservation) DeepCopy() *MySQLTargetConnectionObservation { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseInitParameters) DeepCopyInto(out *MySQLTargetConnectionOnPremiseInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]MySQLTargetConnectionOnPremiseTLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseInitParameters. +func (in *MySQLTargetConnectionOnPremiseInitParameters) DeepCopy() *MySQLTargetConnectionOnPremiseInitParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseObservation) DeepCopyInto(out *MySQLTargetConnectionOnPremiseObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]MySQLTargetConnectionOnPremiseTLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseObservation. +func (in *MySQLTargetConnectionOnPremiseObservation) DeepCopy() *MySQLTargetConnectionOnPremiseObservation { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseParameters) DeepCopyInto(out *MySQLTargetConnectionOnPremiseParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]MySQLTargetConnectionOnPremiseTLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseParameters. +func (in *MySQLTargetConnectionOnPremiseParameters) DeepCopy() *MySQLTargetConnectionOnPremiseParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopyInto(out *MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters. +func (in *MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopy() *MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseTLSModeDisabledObservation) DeepCopyInto(out *MySQLTargetConnectionOnPremiseTLSModeDisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseTLSModeDisabledObservation. +func (in *MySQLTargetConnectionOnPremiseTLSModeDisabledObservation) DeepCopy() *MySQLTargetConnectionOnPremiseTLSModeDisabledObservation { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseTLSModeDisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseTLSModeDisabledParameters) DeepCopyInto(out *MySQLTargetConnectionOnPremiseTLSModeDisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseTLSModeDisabledParameters. +func (in *MySQLTargetConnectionOnPremiseTLSModeDisabledParameters) DeepCopy() *MySQLTargetConnectionOnPremiseTLSModeDisabledParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseTLSModeDisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopyInto(out *MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters. +func (in *MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopy() *MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseTLSModeEnabledObservation) DeepCopyInto(out *MySQLTargetConnectionOnPremiseTLSModeEnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseTLSModeEnabledObservation. +func (in *MySQLTargetConnectionOnPremiseTLSModeEnabledObservation) DeepCopy() *MySQLTargetConnectionOnPremiseTLSModeEnabledObservation { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseTLSModeEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseTLSModeEnabledParameters) DeepCopyInto(out *MySQLTargetConnectionOnPremiseTLSModeEnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseTLSModeEnabledParameters. +func (in *MySQLTargetConnectionOnPremiseTLSModeEnabledParameters) DeepCopy() *MySQLTargetConnectionOnPremiseTLSModeEnabledParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseTLSModeEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseTLSModeInitParameters) DeepCopyInto(out *MySQLTargetConnectionOnPremiseTLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]MySQLTargetConnectionOnPremiseTLSModeDisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]MySQLTargetConnectionOnPremiseTLSModeEnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseTLSModeInitParameters. +func (in *MySQLTargetConnectionOnPremiseTLSModeInitParameters) DeepCopy() *MySQLTargetConnectionOnPremiseTLSModeInitParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseTLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseTLSModeObservation) DeepCopyInto(out *MySQLTargetConnectionOnPremiseTLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]MySQLTargetConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]MySQLTargetConnectionOnPremiseTLSModeEnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseTLSModeObservation. +func (in *MySQLTargetConnectionOnPremiseTLSModeObservation) DeepCopy() *MySQLTargetConnectionOnPremiseTLSModeObservation { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseTLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionOnPremiseTLSModeParameters) DeepCopyInto(out *MySQLTargetConnectionOnPremiseTLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]MySQLTargetConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]MySQLTargetConnectionOnPremiseTLSModeEnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionOnPremiseTLSModeParameters. +func (in *MySQLTargetConnectionOnPremiseTLSModeParameters) DeepCopy() *MySQLTargetConnectionOnPremiseTLSModeParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionOnPremiseTLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetConnectionParameters) DeepCopyInto(out *MySQLTargetConnectionParameters) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]MySQLTargetConnectionOnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetConnectionParameters. +func (in *MySQLTargetConnectionParameters) DeepCopy() *MySQLTargetConnectionParameters { + if in == nil { + return nil + } + out := new(MySQLTargetConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetInitParameters) DeepCopyInto(out *MySQLTargetInitParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MySQLTargetConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseSelector != nil { + in, out := &in.DatabaseSelector, &out.DatabaseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]MySQLTargetPasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SQLMode != nil { + in, out := &in.SQLMode, &out.SQLMode + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceDatabase != nil { + in, out := &in.ServiceDatabase, &out.ServiceDatabase + *out = new(string) + **out = **in + } + if in.SkipConstraintChecks != nil { + in, out := &in.SkipConstraintChecks, &out.SkipConstraintChecks + *out = new(bool) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } + if in.UserRef != nil { + in, out := &in.UserRef, &out.UserRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSelector != nil { + in, out := &in.UserSelector, &out.UserSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetInitParameters. +func (in *MySQLTargetInitParameters) DeepCopy() *MySQLTargetInitParameters { + if in == nil { + return nil + } + out := new(MySQLTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetObservation) DeepCopyInto(out *MySQLTargetObservation) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MySQLTargetConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]MySQLTargetPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SQLMode != nil { + in, out := &in.SQLMode, &out.SQLMode + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceDatabase != nil { + in, out := &in.ServiceDatabase, &out.ServiceDatabase + *out = new(string) + **out = **in + } + if in.SkipConstraintChecks != nil { + in, out := &in.SkipConstraintChecks, &out.SkipConstraintChecks + *out = new(bool) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetObservation. +func (in *MySQLTargetObservation) DeepCopy() *MySQLTargetObservation { + if in == nil { + return nil + } + out := new(MySQLTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetParameters) DeepCopyInto(out *MySQLTargetParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]MySQLTargetConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseSelector != nil { + in, out := &in.DatabaseSelector, &out.DatabaseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]MySQLTargetPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SQLMode != nil { + in, out := &in.SQLMode, &out.SQLMode + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceDatabase != nil { + in, out := &in.ServiceDatabase, &out.ServiceDatabase + *out = new(string) + **out = **in + } + if in.SkipConstraintChecks != nil { + in, out := &in.SkipConstraintChecks, &out.SkipConstraintChecks + *out = new(bool) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } + if in.UserRef != nil { + in, out := &in.UserRef, &out.UserRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSelector != nil { + in, out := &in.UserSelector, &out.UserSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetParameters. +func (in *MySQLTargetParameters) DeepCopy() *MySQLTargetParameters { + if in == nil { + return nil + } + out := new(MySQLTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetPasswordInitParameters) DeepCopyInto(out *MySQLTargetPasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetPasswordInitParameters. +func (in *MySQLTargetPasswordInitParameters) DeepCopy() *MySQLTargetPasswordInitParameters { + if in == nil { + return nil + } + out := new(MySQLTargetPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetPasswordObservation) DeepCopyInto(out *MySQLTargetPasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetPasswordObservation. +func (in *MySQLTargetPasswordObservation) DeepCopy() *MySQLTargetPasswordObservation { + if in == nil { + return nil + } + out := new(MySQLTargetPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLTargetPasswordParameters) DeepCopyInto(out *MySQLTargetPasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLTargetPasswordParameters. +func (in *MySQLTargetPasswordParameters) DeepCopy() *MySQLTargetPasswordParameters { + if in == nil { + return nil + } + out := new(MySQLTargetPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NewNameInitParameters) DeepCopyInto(out *NewNameInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameSpace != nil { + in, out := &in.NameSpace, &out.NameSpace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewNameInitParameters. +func (in *NewNameInitParameters) DeepCopy() *NewNameInitParameters { + if in == nil { + return nil + } + out := new(NewNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NewNameObservation) DeepCopyInto(out *NewNameObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameSpace != nil { + in, out := &in.NameSpace, &out.NameSpace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewNameObservation. +func (in *NewNameObservation) DeepCopy() *NewNameObservation { + if in == nil { + return nil + } + out := new(NewNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NewNameParameters) DeepCopyInto(out *NewNameParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameSpace != nil { + in, out := &in.NameSpace, &out.NameSpace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewNameParameters. +func (in *NewNameParameters) DeepCopy() *NewNameParameters { + if in == nil { + return nil + } + out := new(NewNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoAuthInitParameters) DeepCopyInto(out *NoAuthInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoAuthInitParameters. +func (in *NoAuthInitParameters) DeepCopy() *NoAuthInitParameters { + if in == nil { + return nil + } + out := new(NoAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoAuthObservation) DeepCopyInto(out *NoAuthObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoAuthObservation. +func (in *NoAuthObservation) DeepCopy() *NoAuthObservation { + if in == nil { + return nil + } + out := new(NoAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoAuthParameters) DeepCopyInto(out *NoAuthParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoAuthParameters. +func (in *NoAuthParameters) DeepCopy() *NoAuthParameters { + if in == nil { + return nil + } + out := new(NoAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectTransferSettingsInitParameters) DeepCopyInto(out *ObjectTransferSettingsInitParameters) { + *out = *in + if in.Routine != nil { + in, out := &in.Routine, &out.Routine + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = new(string) + **out = **in + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(string) + **out = **in + } + if in.View != nil { + in, out := &in.View, &out.View + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectTransferSettingsInitParameters. +func (in *ObjectTransferSettingsInitParameters) DeepCopy() *ObjectTransferSettingsInitParameters { + if in == nil { + return nil + } + out := new(ObjectTransferSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectTransferSettingsObservation) DeepCopyInto(out *ObjectTransferSettingsObservation) { + *out = *in + if in.Routine != nil { + in, out := &in.Routine, &out.Routine + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = new(string) + **out = **in + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(string) + **out = **in + } + if in.View != nil { + in, out := &in.View, &out.View + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectTransferSettingsObservation. +func (in *ObjectTransferSettingsObservation) DeepCopy() *ObjectTransferSettingsObservation { + if in == nil { + return nil + } + out := new(ObjectTransferSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectTransferSettingsParameters) DeepCopyInto(out *ObjectTransferSettingsParameters) { + *out = *in + if in.Routine != nil { + in, out := &in.Routine, &out.Routine + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = new(string) + **out = **in + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(string) + **out = **in + } + if in.View != nil { + in, out := &in.View, &out.View + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectTransferSettingsParameters. +func (in *ObjectTransferSettingsParameters) DeepCopy() *ObjectTransferSettingsParameters { + if in == nil { + return nil + } + out := new(ObjectTransferSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseInitParameters) DeepCopyInto(out *OnPremiseInitParameters) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.NativePort != nil { + in, out := &in.NativePort, &out.NativePort + *out = new(float64) + **out = **in + } + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = make([]ShardsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]TLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseInitParameters. +func (in *OnPremiseInitParameters) DeepCopy() *OnPremiseInitParameters { + if in == nil { + return nil + } + out := new(OnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseObservation) DeepCopyInto(out *OnPremiseObservation) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.NativePort != nil { + in, out := &in.NativePort, &out.NativePort + *out = new(float64) + **out = **in + } + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = make([]ShardsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]TLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseObservation. +func (in *OnPremiseObservation) DeepCopy() *OnPremiseObservation { + if in == nil { + return nil + } + out := new(OnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseParameters) DeepCopyInto(out *OnPremiseParameters) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.NativePort != nil { + in, out := &in.NativePort, &out.NativePort + *out = new(float64) + **out = **in + } + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = make([]ShardsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]TLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseParameters. +func (in *OnPremiseParameters) DeepCopy() *OnPremiseParameters { + if in == nil { + return nil + } + out := new(OnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseShardsInitParameters) DeepCopyInto(out *OnPremiseShardsInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseShardsInitParameters. +func (in *OnPremiseShardsInitParameters) DeepCopy() *OnPremiseShardsInitParameters { + if in == nil { + return nil + } + out := new(OnPremiseShardsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseShardsObservation) DeepCopyInto(out *OnPremiseShardsObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseShardsObservation. +func (in *OnPremiseShardsObservation) DeepCopy() *OnPremiseShardsObservation { + if in == nil { + return nil + } + out := new(OnPremiseShardsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseShardsParameters) DeepCopyInto(out *OnPremiseShardsParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseShardsParameters. +func (in *OnPremiseShardsParameters) DeepCopy() *OnPremiseShardsParameters { + if in == nil { + return nil + } + out := new(OnPremiseShardsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseTLSModeDisabledInitParameters) DeepCopyInto(out *OnPremiseTLSModeDisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseTLSModeDisabledInitParameters. +func (in *OnPremiseTLSModeDisabledInitParameters) DeepCopy() *OnPremiseTLSModeDisabledInitParameters { + if in == nil { + return nil + } + out := new(OnPremiseTLSModeDisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseTLSModeDisabledObservation) DeepCopyInto(out *OnPremiseTLSModeDisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseTLSModeDisabledObservation. +func (in *OnPremiseTLSModeDisabledObservation) DeepCopy() *OnPremiseTLSModeDisabledObservation { + if in == nil { + return nil + } + out := new(OnPremiseTLSModeDisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseTLSModeDisabledParameters) DeepCopyInto(out *OnPremiseTLSModeDisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseTLSModeDisabledParameters. +func (in *OnPremiseTLSModeDisabledParameters) DeepCopy() *OnPremiseTLSModeDisabledParameters { + if in == nil { + return nil + } + out := new(OnPremiseTLSModeDisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseTLSModeEnabledInitParameters) DeepCopyInto(out *OnPremiseTLSModeEnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseTLSModeEnabledInitParameters. +func (in *OnPremiseTLSModeEnabledInitParameters) DeepCopy() *OnPremiseTLSModeEnabledInitParameters { + if in == nil { + return nil + } + out := new(OnPremiseTLSModeEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseTLSModeEnabledObservation) DeepCopyInto(out *OnPremiseTLSModeEnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseTLSModeEnabledObservation. +func (in *OnPremiseTLSModeEnabledObservation) DeepCopy() *OnPremiseTLSModeEnabledObservation { + if in == nil { + return nil + } + out := new(OnPremiseTLSModeEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseTLSModeEnabledParameters) DeepCopyInto(out *OnPremiseTLSModeEnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseTLSModeEnabledParameters. +func (in *OnPremiseTLSModeEnabledParameters) DeepCopy() *OnPremiseTLSModeEnabledParameters { + if in == nil { + return nil + } + out := new(OnPremiseTLSModeEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseTLSModeInitParameters) DeepCopyInto(out *OnPremiseTLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]TLSModeDisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]TLSModeEnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseTLSModeInitParameters. +func (in *OnPremiseTLSModeInitParameters) DeepCopy() *OnPremiseTLSModeInitParameters { + if in == nil { + return nil + } + out := new(OnPremiseTLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseTLSModeObservation) DeepCopyInto(out *OnPremiseTLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]TLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]TLSModeEnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseTLSModeObservation. +func (in *OnPremiseTLSModeObservation) DeepCopy() *OnPremiseTLSModeObservation { + if in == nil { + return nil + } + out := new(OnPremiseTLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseTLSModeParameters) DeepCopyInto(out *OnPremiseTLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]TLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]TLSModeEnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseTLSModeParameters. +func (in *OnPremiseTLSModeParameters) DeepCopy() *OnPremiseTLSModeParameters { + if in == nil { + return nil + } + out := new(OnPremiseTLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginalNameInitParameters) DeepCopyInto(out *OriginalNameInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameSpace != nil { + in, out := &in.NameSpace, &out.NameSpace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginalNameInitParameters. +func (in *OriginalNameInitParameters) DeepCopy() *OriginalNameInitParameters { + if in == nil { + return nil + } + out := new(OriginalNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginalNameObservation) DeepCopyInto(out *OriginalNameObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameSpace != nil { + in, out := &in.NameSpace, &out.NameSpace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginalNameObservation. +func (in *OriginalNameObservation) DeepCopy() *OriginalNameObservation { + if in == nil { + return nil + } + out := new(OriginalNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginalNameParameters) DeepCopyInto(out *OriginalNameParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameSpace != nil { + in, out := &in.NameSpace, &out.NameSpace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginalNameParameters. +func (in *OriginalNameParameters) DeepCopy() *OriginalNameParameters { + if in == nil { + return nil + } + out := new(OriginalNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserAuditTrailsV1ParserInitParameters) DeepCopyInto(out *ParserAuditTrailsV1ParserInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserAuditTrailsV1ParserInitParameters. +func (in *ParserAuditTrailsV1ParserInitParameters) DeepCopy() *ParserAuditTrailsV1ParserInitParameters { + if in == nil { + return nil + } + out := new(ParserAuditTrailsV1ParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserAuditTrailsV1ParserObservation) DeepCopyInto(out *ParserAuditTrailsV1ParserObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserAuditTrailsV1ParserObservation. +func (in *ParserAuditTrailsV1ParserObservation) DeepCopy() *ParserAuditTrailsV1ParserObservation { + if in == nil { + return nil + } + out := new(ParserAuditTrailsV1ParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserAuditTrailsV1ParserParameters) DeepCopyInto(out *ParserAuditTrailsV1ParserParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserAuditTrailsV1ParserParameters. +func (in *ParserAuditTrailsV1ParserParameters) DeepCopy() *ParserAuditTrailsV1ParserParameters { + if in == nil { + return nil + } + out := new(ParserAuditTrailsV1ParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserCloudLoggingParserInitParameters) DeepCopyInto(out *ParserCloudLoggingParserInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserCloudLoggingParserInitParameters. +func (in *ParserCloudLoggingParserInitParameters) DeepCopy() *ParserCloudLoggingParserInitParameters { + if in == nil { + return nil + } + out := new(ParserCloudLoggingParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserCloudLoggingParserObservation) DeepCopyInto(out *ParserCloudLoggingParserObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserCloudLoggingParserObservation. +func (in *ParserCloudLoggingParserObservation) DeepCopy() *ParserCloudLoggingParserObservation { + if in == nil { + return nil + } + out := new(ParserCloudLoggingParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserCloudLoggingParserParameters) DeepCopyInto(out *ParserCloudLoggingParserParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserCloudLoggingParserParameters. +func (in *ParserCloudLoggingParserParameters) DeepCopy() *ParserCloudLoggingParserParameters { + if in == nil { + return nil + } + out := new(ParserCloudLoggingParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserInitParameters) DeepCopyInto(out *ParserInitParameters) { + *out = *in + if in.AuditTrailsV1Parser != nil { + in, out := &in.AuditTrailsV1Parser, &out.AuditTrailsV1Parser + *out = make([]AuditTrailsV1ParserInitParameters, len(*in)) + copy(*out, *in) + } + if in.CloudLoggingParser != nil { + in, out := &in.CloudLoggingParser, &out.CloudLoggingParser + *out = make([]CloudLoggingParserInitParameters, len(*in)) + copy(*out, *in) + } + if in.JSONParser != nil { + in, out := &in.JSONParser, &out.JSONParser + *out = make([]JSONParserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TskvParser != nil { + in, out := &in.TskvParser, &out.TskvParser + *out = make([]TskvParserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserInitParameters. +func (in *ParserInitParameters) DeepCopy() *ParserInitParameters { + if in == nil { + return nil + } + out := new(ParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserJSONParserInitParameters) DeepCopyInto(out *ParserJSONParserInitParameters) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]JSONParserDataSchemaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserJSONParserInitParameters. +func (in *ParserJSONParserInitParameters) DeepCopy() *ParserJSONParserInitParameters { + if in == nil { + return nil + } + out := new(ParserJSONParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserJSONParserObservation) DeepCopyInto(out *ParserJSONParserObservation) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]JSONParserDataSchemaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserJSONParserObservation. +func (in *ParserJSONParserObservation) DeepCopy() *ParserJSONParserObservation { + if in == nil { + return nil + } + out := new(ParserJSONParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserJSONParserParameters) DeepCopyInto(out *ParserJSONParserParameters) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]JSONParserDataSchemaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserJSONParserParameters. +func (in *ParserJSONParserParameters) DeepCopy() *ParserJSONParserParameters { + if in == nil { + return nil + } + out := new(ParserJSONParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserObservation) DeepCopyInto(out *ParserObservation) { + *out = *in + if in.AuditTrailsV1Parser != nil { + in, out := &in.AuditTrailsV1Parser, &out.AuditTrailsV1Parser + *out = make([]AuditTrailsV1ParserParameters, len(*in)) + copy(*out, *in) + } + if in.CloudLoggingParser != nil { + in, out := &in.CloudLoggingParser, &out.CloudLoggingParser + *out = make([]CloudLoggingParserParameters, len(*in)) + copy(*out, *in) + } + if in.JSONParser != nil { + in, out := &in.JSONParser, &out.JSONParser + *out = make([]JSONParserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TskvParser != nil { + in, out := &in.TskvParser, &out.TskvParser + *out = make([]TskvParserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserObservation. +func (in *ParserObservation) DeepCopy() *ParserObservation { + if in == nil { + return nil + } + out := new(ParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserParameters) DeepCopyInto(out *ParserParameters) { + *out = *in + if in.AuditTrailsV1Parser != nil { + in, out := &in.AuditTrailsV1Parser, &out.AuditTrailsV1Parser + *out = make([]AuditTrailsV1ParserParameters, len(*in)) + copy(*out, *in) + } + if in.CloudLoggingParser != nil { + in, out := &in.CloudLoggingParser, &out.CloudLoggingParser + *out = make([]CloudLoggingParserParameters, len(*in)) + copy(*out, *in) + } + if in.JSONParser != nil { + in, out := &in.JSONParser, &out.JSONParser + *out = make([]JSONParserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TskvParser != nil { + in, out := &in.TskvParser, &out.TskvParser + *out = make([]TskvParserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserParameters. +func (in *ParserParameters) DeepCopy() *ParserParameters { + if in == nil { + return nil + } + out := new(ParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserTskvParserDataSchemaInitParameters) DeepCopyInto(out *ParserTskvParserDataSchemaInitParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]TskvParserDataSchemaFieldsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserTskvParserDataSchemaInitParameters. +func (in *ParserTskvParserDataSchemaInitParameters) DeepCopy() *ParserTskvParserDataSchemaInitParameters { + if in == nil { + return nil + } + out := new(ParserTskvParserDataSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserTskvParserDataSchemaObservation) DeepCopyInto(out *ParserTskvParserDataSchemaObservation) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]TskvParserDataSchemaFieldsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserTskvParserDataSchemaObservation. +func (in *ParserTskvParserDataSchemaObservation) DeepCopy() *ParserTskvParserDataSchemaObservation { + if in == nil { + return nil + } + out := new(ParserTskvParserDataSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserTskvParserDataSchemaParameters) DeepCopyInto(out *ParserTskvParserDataSchemaParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]TskvParserDataSchemaFieldsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserTskvParserDataSchemaParameters. +func (in *ParserTskvParserDataSchemaParameters) DeepCopy() *ParserTskvParserDataSchemaParameters { + if in == nil { + return nil + } + out := new(ParserTskvParserDataSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserTskvParserInitParameters) DeepCopyInto(out *ParserTskvParserInitParameters) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]ParserTskvParserDataSchemaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserTskvParserInitParameters. +func (in *ParserTskvParserInitParameters) DeepCopy() *ParserTskvParserInitParameters { + if in == nil { + return nil + } + out := new(ParserTskvParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserTskvParserObservation) DeepCopyInto(out *ParserTskvParserObservation) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]ParserTskvParserDataSchemaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserTskvParserObservation. +func (in *ParserTskvParserObservation) DeepCopy() *ParserTskvParserObservation { + if in == nil { + return nil + } + out := new(ParserTskvParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParserTskvParserParameters) DeepCopyInto(out *ParserTskvParserParameters) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]ParserTskvParserDataSchemaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParserTskvParserParameters. +func (in *ParserTskvParserParameters) DeepCopy() *ParserTskvParserParameters { + if in == nil { + return nil + } + out := new(ParserTskvParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordInitParameters) DeepCopyInto(out *PasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordInitParameters. +func (in *PasswordInitParameters) DeepCopy() *PasswordInitParameters { + if in == nil { + return nil + } + out := new(PasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordObservation) DeepCopyInto(out *PasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordObservation. +func (in *PasswordObservation) DeepCopy() *PasswordObservation { + if in == nil { + return nil + } + out := new(PasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordParameters) DeepCopyInto(out *PasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordParameters. +func (in *PasswordParameters) DeepCopy() *PasswordParameters { + if in == nil { + return nil + } + out := new(PasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionInitParameters) DeepCopyInto(out *PostgresSourceConnectionInitParameters) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]PostgresSourceConnectionOnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionInitParameters. +func (in *PostgresSourceConnectionInitParameters) DeepCopy() *PostgresSourceConnectionInitParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionObservation) DeepCopyInto(out *PostgresSourceConnectionObservation) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]PostgresSourceConnectionOnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionObservation. +func (in *PostgresSourceConnectionObservation) DeepCopy() *PostgresSourceConnectionObservation { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseInitParameters) DeepCopyInto(out *PostgresSourceConnectionOnPremiseInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]PostgresSourceConnectionOnPremiseTLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseInitParameters. +func (in *PostgresSourceConnectionOnPremiseInitParameters) DeepCopy() *PostgresSourceConnectionOnPremiseInitParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseObservation) DeepCopyInto(out *PostgresSourceConnectionOnPremiseObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]PostgresSourceConnectionOnPremiseTLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseObservation. +func (in *PostgresSourceConnectionOnPremiseObservation) DeepCopy() *PostgresSourceConnectionOnPremiseObservation { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseParameters) DeepCopyInto(out *PostgresSourceConnectionOnPremiseParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]PostgresSourceConnectionOnPremiseTLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseParameters. +func (in *PostgresSourceConnectionOnPremiseParameters) DeepCopy() *PostgresSourceConnectionOnPremiseParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopyInto(out *PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters. +func (in *PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopy() *PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseTLSModeDisabledObservation) DeepCopyInto(out *PostgresSourceConnectionOnPremiseTLSModeDisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseTLSModeDisabledObservation. +func (in *PostgresSourceConnectionOnPremiseTLSModeDisabledObservation) DeepCopy() *PostgresSourceConnectionOnPremiseTLSModeDisabledObservation { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseTLSModeDisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseTLSModeDisabledParameters) DeepCopyInto(out *PostgresSourceConnectionOnPremiseTLSModeDisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseTLSModeDisabledParameters. +func (in *PostgresSourceConnectionOnPremiseTLSModeDisabledParameters) DeepCopy() *PostgresSourceConnectionOnPremiseTLSModeDisabledParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseTLSModeDisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopyInto(out *PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters. +func (in *PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopy() *PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseTLSModeEnabledObservation) DeepCopyInto(out *PostgresSourceConnectionOnPremiseTLSModeEnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseTLSModeEnabledObservation. +func (in *PostgresSourceConnectionOnPremiseTLSModeEnabledObservation) DeepCopy() *PostgresSourceConnectionOnPremiseTLSModeEnabledObservation { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseTLSModeEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseTLSModeEnabledParameters) DeepCopyInto(out *PostgresSourceConnectionOnPremiseTLSModeEnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseTLSModeEnabledParameters. +func (in *PostgresSourceConnectionOnPremiseTLSModeEnabledParameters) DeepCopy() *PostgresSourceConnectionOnPremiseTLSModeEnabledParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseTLSModeEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseTLSModeInitParameters) DeepCopyInto(out *PostgresSourceConnectionOnPremiseTLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]PostgresSourceConnectionOnPremiseTLSModeDisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]PostgresSourceConnectionOnPremiseTLSModeEnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseTLSModeInitParameters. +func (in *PostgresSourceConnectionOnPremiseTLSModeInitParameters) DeepCopy() *PostgresSourceConnectionOnPremiseTLSModeInitParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseTLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseTLSModeObservation) DeepCopyInto(out *PostgresSourceConnectionOnPremiseTLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]PostgresSourceConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]PostgresSourceConnectionOnPremiseTLSModeEnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseTLSModeObservation. +func (in *PostgresSourceConnectionOnPremiseTLSModeObservation) DeepCopy() *PostgresSourceConnectionOnPremiseTLSModeObservation { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseTLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionOnPremiseTLSModeParameters) DeepCopyInto(out *PostgresSourceConnectionOnPremiseTLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]PostgresSourceConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]PostgresSourceConnectionOnPremiseTLSModeEnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionOnPremiseTLSModeParameters. +func (in *PostgresSourceConnectionOnPremiseTLSModeParameters) DeepCopy() *PostgresSourceConnectionOnPremiseTLSModeParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionOnPremiseTLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceConnectionParameters) DeepCopyInto(out *PostgresSourceConnectionParameters) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]PostgresSourceConnectionOnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceConnectionParameters. +func (in *PostgresSourceConnectionParameters) DeepCopy() *PostgresSourceConnectionParameters { + if in == nil { + return nil + } + out := new(PostgresSourceConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceInitParameters) DeepCopyInto(out *PostgresSourceInitParameters) { + *out = *in + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]PostgresSourceConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseSelector != nil { + in, out := &in.DatabaseSelector, &out.DatabaseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectTransferSettings != nil { + in, out := &in.ObjectTransferSettings, &out.ObjectTransferSettings + *out = make([]PostgresSourceObjectTransferSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]PostgresSourcePasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceSchema != nil { + in, out := &in.ServiceSchema, &out.ServiceSchema + *out = new(string) + **out = **in + } + if in.SlotGigabyteLagLimit != nil { + in, out := &in.SlotGigabyteLagLimit, &out.SlotGigabyteLagLimit + *out = new(float64) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } + if in.UserRef != nil { + in, out := &in.UserRef, &out.UserRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSelector != nil { + in, out := &in.UserSelector, &out.UserSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceInitParameters. +func (in *PostgresSourceInitParameters) DeepCopy() *PostgresSourceInitParameters { + if in == nil { + return nil + } + out := new(PostgresSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceObjectTransferSettingsInitParameters) DeepCopyInto(out *PostgresSourceObjectTransferSettingsInitParameters) { + *out = *in + if in.Cast != nil { + in, out := &in.Cast, &out.Cast + *out = new(string) + **out = **in + } + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.Constraint != nil { + in, out := &in.Constraint, &out.Constraint + *out = new(string) + **out = **in + } + if in.DefaultValues != nil { + in, out := &in.DefaultValues, &out.DefaultValues + *out = new(string) + **out = **in + } + if in.FkConstraint != nil { + in, out := &in.FkConstraint, &out.FkConstraint + *out = new(string) + **out = **in + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.MaterializedView != nil { + in, out := &in.MaterializedView, &out.MaterializedView + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PrimaryKey != nil { + in, out := &in.PrimaryKey, &out.PrimaryKey + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } + if in.Sequence != nil { + in, out := &in.Sequence, &out.Sequence + *out = new(string) + **out = **in + } + if in.SequenceOwnedBy != nil { + in, out := &in.SequenceOwnedBy, &out.SequenceOwnedBy + *out = new(string) + **out = **in + } + if in.SequenceSet != nil { + in, out := &in.SequenceSet, &out.SequenceSet + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.View != nil { + in, out := &in.View, &out.View + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceObjectTransferSettingsInitParameters. +func (in *PostgresSourceObjectTransferSettingsInitParameters) DeepCopy() *PostgresSourceObjectTransferSettingsInitParameters { + if in == nil { + return nil + } + out := new(PostgresSourceObjectTransferSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceObjectTransferSettingsObservation) DeepCopyInto(out *PostgresSourceObjectTransferSettingsObservation) { + *out = *in + if in.Cast != nil { + in, out := &in.Cast, &out.Cast + *out = new(string) + **out = **in + } + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.Constraint != nil { + in, out := &in.Constraint, &out.Constraint + *out = new(string) + **out = **in + } + if in.DefaultValues != nil { + in, out := &in.DefaultValues, &out.DefaultValues + *out = new(string) + **out = **in + } + if in.FkConstraint != nil { + in, out := &in.FkConstraint, &out.FkConstraint + *out = new(string) + **out = **in + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.MaterializedView != nil { + in, out := &in.MaterializedView, &out.MaterializedView + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PrimaryKey != nil { + in, out := &in.PrimaryKey, &out.PrimaryKey + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } + if in.Sequence != nil { + in, out := &in.Sequence, &out.Sequence + *out = new(string) + **out = **in + } + if in.SequenceOwnedBy != nil { + in, out := &in.SequenceOwnedBy, &out.SequenceOwnedBy + *out = new(string) + **out = **in + } + if in.SequenceSet != nil { + in, out := &in.SequenceSet, &out.SequenceSet + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.View != nil { + in, out := &in.View, &out.View + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceObjectTransferSettingsObservation. +func (in *PostgresSourceObjectTransferSettingsObservation) DeepCopy() *PostgresSourceObjectTransferSettingsObservation { + if in == nil { + return nil + } + out := new(PostgresSourceObjectTransferSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceObjectTransferSettingsParameters) DeepCopyInto(out *PostgresSourceObjectTransferSettingsParameters) { + *out = *in + if in.Cast != nil { + in, out := &in.Cast, &out.Cast + *out = new(string) + **out = **in + } + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.Constraint != nil { + in, out := &in.Constraint, &out.Constraint + *out = new(string) + **out = **in + } + if in.DefaultValues != nil { + in, out := &in.DefaultValues, &out.DefaultValues + *out = new(string) + **out = **in + } + if in.FkConstraint != nil { + in, out := &in.FkConstraint, &out.FkConstraint + *out = new(string) + **out = **in + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.MaterializedView != nil { + in, out := &in.MaterializedView, &out.MaterializedView + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PrimaryKey != nil { + in, out := &in.PrimaryKey, &out.PrimaryKey + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } + if in.Sequence != nil { + in, out := &in.Sequence, &out.Sequence + *out = new(string) + **out = **in + } + if in.SequenceOwnedBy != nil { + in, out := &in.SequenceOwnedBy, &out.SequenceOwnedBy + *out = new(string) + **out = **in + } + if in.SequenceSet != nil { + in, out := &in.SequenceSet, &out.SequenceSet + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.View != nil { + in, out := &in.View, &out.View + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceObjectTransferSettingsParameters. +func (in *PostgresSourceObjectTransferSettingsParameters) DeepCopy() *PostgresSourceObjectTransferSettingsParameters { + if in == nil { + return nil + } + out := new(PostgresSourceObjectTransferSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceObservation) DeepCopyInto(out *PostgresSourceObservation) { + *out = *in + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]PostgresSourceConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectTransferSettings != nil { + in, out := &in.ObjectTransferSettings, &out.ObjectTransferSettings + *out = make([]PostgresSourceObjectTransferSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]PostgresSourcePasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceSchema != nil { + in, out := &in.ServiceSchema, &out.ServiceSchema + *out = new(string) + **out = **in + } + if in.SlotGigabyteLagLimit != nil { + in, out := &in.SlotGigabyteLagLimit, &out.SlotGigabyteLagLimit + *out = new(float64) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceObservation. +func (in *PostgresSourceObservation) DeepCopy() *PostgresSourceObservation { + if in == nil { + return nil + } + out := new(PostgresSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourceParameters) DeepCopyInto(out *PostgresSourceParameters) { + *out = *in + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]PostgresSourceConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseSelector != nil { + in, out := &in.DatabaseSelector, &out.DatabaseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectTransferSettings != nil { + in, out := &in.ObjectTransferSettings, &out.ObjectTransferSettings + *out = make([]PostgresSourceObjectTransferSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]PostgresSourcePasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceSchema != nil { + in, out := &in.ServiceSchema, &out.ServiceSchema + *out = new(string) + **out = **in + } + if in.SlotGigabyteLagLimit != nil { + in, out := &in.SlotGigabyteLagLimit, &out.SlotGigabyteLagLimit + *out = new(float64) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } + if in.UserRef != nil { + in, out := &in.UserRef, &out.UserRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSelector != nil { + in, out := &in.UserSelector, &out.UserSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourceParameters. +func (in *PostgresSourceParameters) DeepCopy() *PostgresSourceParameters { + if in == nil { + return nil + } + out := new(PostgresSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourcePasswordInitParameters) DeepCopyInto(out *PostgresSourcePasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourcePasswordInitParameters. +func (in *PostgresSourcePasswordInitParameters) DeepCopy() *PostgresSourcePasswordInitParameters { + if in == nil { + return nil + } + out := new(PostgresSourcePasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourcePasswordObservation) DeepCopyInto(out *PostgresSourcePasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourcePasswordObservation. +func (in *PostgresSourcePasswordObservation) DeepCopy() *PostgresSourcePasswordObservation { + if in == nil { + return nil + } + out := new(PostgresSourcePasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSourcePasswordParameters) DeepCopyInto(out *PostgresSourcePasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSourcePasswordParameters. +func (in *PostgresSourcePasswordParameters) DeepCopy() *PostgresSourcePasswordParameters { + if in == nil { + return nil + } + out := new(PostgresSourcePasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionInitParameters) DeepCopyInto(out *PostgresTargetConnectionInitParameters) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]PostgresTargetConnectionOnPremiseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionInitParameters. +func (in *PostgresTargetConnectionInitParameters) DeepCopy() *PostgresTargetConnectionInitParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionObservation) DeepCopyInto(out *PostgresTargetConnectionObservation) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]PostgresTargetConnectionOnPremiseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionObservation. +func (in *PostgresTargetConnectionObservation) DeepCopy() *PostgresTargetConnectionObservation { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseInitParameters) DeepCopyInto(out *PostgresTargetConnectionOnPremiseInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]PostgresTargetConnectionOnPremiseTLSModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseInitParameters. +func (in *PostgresTargetConnectionOnPremiseInitParameters) DeepCopy() *PostgresTargetConnectionOnPremiseInitParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseObservation) DeepCopyInto(out *PostgresTargetConnectionOnPremiseObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]PostgresTargetConnectionOnPremiseTLSModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseObservation. +func (in *PostgresTargetConnectionOnPremiseObservation) DeepCopy() *PostgresTargetConnectionOnPremiseObservation { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseParameters) DeepCopyInto(out *PostgresTargetConnectionOnPremiseParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSMode != nil { + in, out := &in.TLSMode, &out.TLSMode + *out = make([]PostgresTargetConnectionOnPremiseTLSModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseParameters. +func (in *PostgresTargetConnectionOnPremiseParameters) DeepCopy() *PostgresTargetConnectionOnPremiseParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopyInto(out *PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters. +func (in *PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters) DeepCopy() *PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseTLSModeDisabledObservation) DeepCopyInto(out *PostgresTargetConnectionOnPremiseTLSModeDisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseTLSModeDisabledObservation. +func (in *PostgresTargetConnectionOnPremiseTLSModeDisabledObservation) DeepCopy() *PostgresTargetConnectionOnPremiseTLSModeDisabledObservation { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseTLSModeDisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseTLSModeDisabledParameters) DeepCopyInto(out *PostgresTargetConnectionOnPremiseTLSModeDisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseTLSModeDisabledParameters. +func (in *PostgresTargetConnectionOnPremiseTLSModeDisabledParameters) DeepCopy() *PostgresTargetConnectionOnPremiseTLSModeDisabledParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseTLSModeDisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopyInto(out *PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters. +func (in *PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters) DeepCopy() *PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseTLSModeEnabledObservation) DeepCopyInto(out *PostgresTargetConnectionOnPremiseTLSModeEnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseTLSModeEnabledObservation. +func (in *PostgresTargetConnectionOnPremiseTLSModeEnabledObservation) DeepCopy() *PostgresTargetConnectionOnPremiseTLSModeEnabledObservation { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseTLSModeEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseTLSModeEnabledParameters) DeepCopyInto(out *PostgresTargetConnectionOnPremiseTLSModeEnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseTLSModeEnabledParameters. +func (in *PostgresTargetConnectionOnPremiseTLSModeEnabledParameters) DeepCopy() *PostgresTargetConnectionOnPremiseTLSModeEnabledParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseTLSModeEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseTLSModeInitParameters) DeepCopyInto(out *PostgresTargetConnectionOnPremiseTLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]PostgresTargetConnectionOnPremiseTLSModeDisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]PostgresTargetConnectionOnPremiseTLSModeEnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseTLSModeInitParameters. +func (in *PostgresTargetConnectionOnPremiseTLSModeInitParameters) DeepCopy() *PostgresTargetConnectionOnPremiseTLSModeInitParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseTLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseTLSModeObservation) DeepCopyInto(out *PostgresTargetConnectionOnPremiseTLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]PostgresTargetConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]PostgresTargetConnectionOnPremiseTLSModeEnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseTLSModeObservation. +func (in *PostgresTargetConnectionOnPremiseTLSModeObservation) DeepCopy() *PostgresTargetConnectionOnPremiseTLSModeObservation { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseTLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionOnPremiseTLSModeParameters) DeepCopyInto(out *PostgresTargetConnectionOnPremiseTLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]PostgresTargetConnectionOnPremiseTLSModeDisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]PostgresTargetConnectionOnPremiseTLSModeEnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionOnPremiseTLSModeParameters. +func (in *PostgresTargetConnectionOnPremiseTLSModeParameters) DeepCopy() *PostgresTargetConnectionOnPremiseTLSModeParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionOnPremiseTLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetConnectionParameters) DeepCopyInto(out *PostgresTargetConnectionParameters) { + *out = *in + if in.MdbClusterID != nil { + in, out := &in.MdbClusterID, &out.MdbClusterID + *out = new(string) + **out = **in + } + if in.MdbClusterIDRef != nil { + in, out := &in.MdbClusterIDRef, &out.MdbClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MdbClusterIDSelector != nil { + in, out := &in.MdbClusterIDSelector, &out.MdbClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OnPremise != nil { + in, out := &in.OnPremise, &out.OnPremise + *out = make([]PostgresTargetConnectionOnPremiseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetConnectionParameters. +func (in *PostgresTargetConnectionParameters) DeepCopy() *PostgresTargetConnectionParameters { + if in == nil { + return nil + } + out := new(PostgresTargetConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetInitParameters) DeepCopyInto(out *PostgresTargetInitParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]PostgresTargetConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseSelector != nil { + in, out := &in.DatabaseSelector, &out.DatabaseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]PostgresTargetPasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } + if in.UserRef != nil { + in, out := &in.UserRef, &out.UserRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSelector != nil { + in, out := &in.UserSelector, &out.UserSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetInitParameters. +func (in *PostgresTargetInitParameters) DeepCopy() *PostgresTargetInitParameters { + if in == nil { + return nil + } + out := new(PostgresTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetObservation) DeepCopyInto(out *PostgresTargetObservation) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]PostgresTargetConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]PostgresTargetPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetObservation. +func (in *PostgresTargetObservation) DeepCopy() *PostgresTargetObservation { + if in == nil { + return nil + } + out := new(PostgresTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetParameters) DeepCopyInto(out *PostgresTargetParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = make([]PostgresTargetConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseSelector != nil { + in, out := &in.DatabaseSelector, &out.DatabaseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]PostgresTargetPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } + if in.UserRef != nil { + in, out := &in.UserRef, &out.UserRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSelector != nil { + in, out := &in.UserSelector, &out.UserSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetParameters. +func (in *PostgresTargetParameters) DeepCopy() *PostgresTargetParameters { + if in == nil { + return nil + } + out := new(PostgresTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetPasswordInitParameters) DeepCopyInto(out *PostgresTargetPasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetPasswordInitParameters. +func (in *PostgresTargetPasswordInitParameters) DeepCopy() *PostgresTargetPasswordInitParameters { + if in == nil { + return nil + } + out := new(PostgresTargetPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetPasswordObservation) DeepCopyInto(out *PostgresTargetPasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetPasswordObservation. +func (in *PostgresTargetPasswordObservation) DeepCopy() *PostgresTargetPasswordObservation { + if in == nil { + return nil + } + out := new(PostgresTargetPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTargetPasswordParameters) DeepCopyInto(out *PostgresTargetPasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTargetPasswordParameters. +func (in *PostgresTargetPasswordParameters) DeepCopy() *PostgresTargetPasswordParameters { + if in == nil { + return nil + } + out := new(PostgresTargetPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RenameTablesInitParameters) DeepCopyInto(out *RenameTablesInitParameters) { + *out = *in + if in.RenameTables != nil { + in, out := &in.RenameTables, &out.RenameTables + *out = make([]RenameTablesRenameTablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RenameTablesInitParameters. +func (in *RenameTablesInitParameters) DeepCopy() *RenameTablesInitParameters { + if in == nil { + return nil + } + out := new(RenameTablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RenameTablesObservation) DeepCopyInto(out *RenameTablesObservation) { + *out = *in + if in.RenameTables != nil { + in, out := &in.RenameTables, &out.RenameTables + *out = make([]RenameTablesRenameTablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RenameTablesObservation. +func (in *RenameTablesObservation) DeepCopy() *RenameTablesObservation { + if in == nil { + return nil + } + out := new(RenameTablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RenameTablesParameters) DeepCopyInto(out *RenameTablesParameters) { + *out = *in + if in.RenameTables != nil { + in, out := &in.RenameTables, &out.RenameTables + *out = make([]RenameTablesRenameTablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RenameTablesParameters. +func (in *RenameTablesParameters) DeepCopy() *RenameTablesParameters { + if in == nil { + return nil + } + out := new(RenameTablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RenameTablesRenameTablesInitParameters) DeepCopyInto(out *RenameTablesRenameTablesInitParameters) { + *out = *in + if in.NewName != nil { + in, out := &in.NewName, &out.NewName + *out = make([]NewNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginalName != nil { + in, out := &in.OriginalName, &out.OriginalName + *out = make([]OriginalNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RenameTablesRenameTablesInitParameters. +func (in *RenameTablesRenameTablesInitParameters) DeepCopy() *RenameTablesRenameTablesInitParameters { + if in == nil { + return nil + } + out := new(RenameTablesRenameTablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RenameTablesRenameTablesObservation) DeepCopyInto(out *RenameTablesRenameTablesObservation) { + *out = *in + if in.NewName != nil { + in, out := &in.NewName, &out.NewName + *out = make([]NewNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginalName != nil { + in, out := &in.OriginalName, &out.OriginalName + *out = make([]OriginalNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RenameTablesRenameTablesObservation. +func (in *RenameTablesRenameTablesObservation) DeepCopy() *RenameTablesRenameTablesObservation { + if in == nil { + return nil + } + out := new(RenameTablesRenameTablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RenameTablesRenameTablesParameters) DeepCopyInto(out *RenameTablesRenameTablesParameters) { + *out = *in + if in.NewName != nil { + in, out := &in.NewName, &out.NewName + *out = make([]NewNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginalName != nil { + in, out := &in.OriginalName, &out.OriginalName + *out = make([]OriginalNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RenameTablesRenameTablesParameters. +func (in *RenameTablesRenameTablesParameters) DeepCopy() *RenameTablesRenameTablesParameters { + if in == nil { + return nil + } + out := new(RenameTablesRenameTablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplacePrimaryKeyInitParameters) DeepCopyInto(out *ReplacePrimaryKeyInitParameters) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]ReplacePrimaryKeyTablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplacePrimaryKeyInitParameters. +func (in *ReplacePrimaryKeyInitParameters) DeepCopy() *ReplacePrimaryKeyInitParameters { + if in == nil { + return nil + } + out := new(ReplacePrimaryKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplacePrimaryKeyObservation) DeepCopyInto(out *ReplacePrimaryKeyObservation) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]ReplacePrimaryKeyTablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplacePrimaryKeyObservation. +func (in *ReplacePrimaryKeyObservation) DeepCopy() *ReplacePrimaryKeyObservation { + if in == nil { + return nil + } + out := new(ReplacePrimaryKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplacePrimaryKeyParameters) DeepCopyInto(out *ReplacePrimaryKeyParameters) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]ReplacePrimaryKeyTablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplacePrimaryKeyParameters. +func (in *ReplacePrimaryKeyParameters) DeepCopy() *ReplacePrimaryKeyParameters { + if in == nil { + return nil + } + out := new(ReplacePrimaryKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplacePrimaryKeyTablesInitParameters) DeepCopyInto(out *ReplacePrimaryKeyTablesInitParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplacePrimaryKeyTablesInitParameters. +func (in *ReplacePrimaryKeyTablesInitParameters) DeepCopy() *ReplacePrimaryKeyTablesInitParameters { + if in == nil { + return nil + } + out := new(ReplacePrimaryKeyTablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplacePrimaryKeyTablesObservation) DeepCopyInto(out *ReplacePrimaryKeyTablesObservation) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplacePrimaryKeyTablesObservation. +func (in *ReplacePrimaryKeyTablesObservation) DeepCopy() *ReplacePrimaryKeyTablesObservation { + if in == nil { + return nil + } + out := new(ReplacePrimaryKeyTablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplacePrimaryKeyTablesParameters) DeepCopyInto(out *ReplacePrimaryKeyTablesParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplacePrimaryKeyTablesParameters. +func (in *ReplacePrimaryKeyTablesParameters) DeepCopy() *ReplacePrimaryKeyTablesParameters { + if in == nil { + return nil + } + out := new(ReplacePrimaryKeyTablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoundRobinInitParameters) DeepCopyInto(out *RoundRobinInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoundRobinInitParameters. +func (in *RoundRobinInitParameters) DeepCopy() *RoundRobinInitParameters { + if in == nil { + return nil + } + out := new(RoundRobinInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoundRobinObservation) DeepCopyInto(out *RoundRobinObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoundRobinObservation. +func (in *RoundRobinObservation) DeepCopy() *RoundRobinObservation { + if in == nil { + return nil + } + out := new(RoundRobinObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoundRobinParameters) DeepCopyInto(out *RoundRobinParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoundRobinParameters. +func (in *RoundRobinParameters) DeepCopy() *RoundRobinParameters { + if in == nil { + return nil + } + out := new(RoundRobinParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeInitParameters) DeepCopyInto(out *RuntimeInitParameters) { + *out = *in + if in.YcRuntime != nil { + in, out := &in.YcRuntime, &out.YcRuntime + *out = make([]YcRuntimeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeInitParameters. +func (in *RuntimeInitParameters) DeepCopy() *RuntimeInitParameters { + if in == nil { + return nil + } + out := new(RuntimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeObservation) DeepCopyInto(out *RuntimeObservation) { + *out = *in + if in.YcRuntime != nil { + in, out := &in.YcRuntime, &out.YcRuntime + *out = make([]YcRuntimeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeObservation. +func (in *RuntimeObservation) DeepCopy() *RuntimeObservation { + if in == nil { + return nil + } + out := new(RuntimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeParameters) DeepCopyInto(out *RuntimeParameters) { + *out = *in + if in.YcRuntime != nil { + in, out := &in.YcRuntime, &out.YcRuntime + *out = make([]YcRuntimeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeParameters. +func (in *RuntimeParameters) DeepCopy() *RuntimeParameters { + if in == nil { + return nil + } + out := new(RuntimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SaslInitParameters) DeepCopyInto(out *SaslInitParameters) { + *out = *in + if in.Mechanism != nil { + in, out := &in.Mechanism, &out.Mechanism + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]SaslPasswordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SaslInitParameters. +func (in *SaslInitParameters) DeepCopy() *SaslInitParameters { + if in == nil { + return nil + } + out := new(SaslInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SaslObservation) DeepCopyInto(out *SaslObservation) { + *out = *in + if in.Mechanism != nil { + in, out := &in.Mechanism, &out.Mechanism + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]SaslPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SaslObservation. +func (in *SaslObservation) DeepCopy() *SaslObservation { + if in == nil { + return nil + } + out := new(SaslObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SaslParameters) DeepCopyInto(out *SaslParameters) { + *out = *in + if in.Mechanism != nil { + in, out := &in.Mechanism, &out.Mechanism + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = make([]SaslPasswordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SaslParameters. +func (in *SaslParameters) DeepCopy() *SaslParameters { + if in == nil { + return nil + } + out := new(SaslParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SaslPasswordInitParameters) DeepCopyInto(out *SaslPasswordInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SaslPasswordInitParameters. +func (in *SaslPasswordInitParameters) DeepCopy() *SaslPasswordInitParameters { + if in == nil { + return nil + } + out := new(SaslPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SaslPasswordObservation) DeepCopyInto(out *SaslPasswordObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SaslPasswordObservation. +func (in *SaslPasswordObservation) DeepCopy() *SaslPasswordObservation { + if in == nil { + return nil + } + out := new(SaslPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SaslPasswordParameters) DeepCopyInto(out *SaslPasswordParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SaslPasswordParameters. +func (in *SaslPasswordParameters) DeepCopy() *SaslPasswordParameters { + if in == nil { + return nil + } + out := new(SaslPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerAutoInitParameters) DeepCopyInto(out *SerializerAutoInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerAutoInitParameters. +func (in *SerializerAutoInitParameters) DeepCopy() *SerializerAutoInitParameters { + if in == nil { + return nil + } + out := new(SerializerAutoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerAutoObservation) DeepCopyInto(out *SerializerAutoObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerAutoObservation. +func (in *SerializerAutoObservation) DeepCopy() *SerializerAutoObservation { + if in == nil { + return nil + } + out := new(SerializerAutoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerAutoParameters) DeepCopyInto(out *SerializerAutoParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerAutoParameters. +func (in *SerializerAutoParameters) DeepCopy() *SerializerAutoParameters { + if in == nil { + return nil + } + out := new(SerializerAutoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerDebeziumInitParameters) DeepCopyInto(out *SerializerDebeziumInitParameters) { + *out = *in + if in.SerializerParameters != nil { + in, out := &in.SerializerParameters, &out.SerializerParameters + *out = make([]SerializerParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerDebeziumInitParameters. +func (in *SerializerDebeziumInitParameters) DeepCopy() *SerializerDebeziumInitParameters { + if in == nil { + return nil + } + out := new(SerializerDebeziumInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerDebeziumObservation) DeepCopyInto(out *SerializerDebeziumObservation) { + *out = *in + if in.SerializerParameters != nil { + in, out := &in.SerializerParameters, &out.SerializerParameters + *out = make([]SerializerParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerDebeziumObservation. +func (in *SerializerDebeziumObservation) DeepCopy() *SerializerDebeziumObservation { + if in == nil { + return nil + } + out := new(SerializerDebeziumObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerDebeziumParameters) DeepCopyInto(out *SerializerDebeziumParameters) { + *out = *in + if in.SerializerParameters != nil { + in, out := &in.SerializerParameters, &out.SerializerParameters + *out = make([]SerializerParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerDebeziumParameters. +func (in *SerializerDebeziumParameters) DeepCopy() *SerializerDebeziumParameters { + if in == nil { + return nil + } + out := new(SerializerDebeziumParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerDebeziumSerializerParametersInitParameters) DeepCopyInto(out *SerializerDebeziumSerializerParametersInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerDebeziumSerializerParametersInitParameters. +func (in *SerializerDebeziumSerializerParametersInitParameters) DeepCopy() *SerializerDebeziumSerializerParametersInitParameters { + if in == nil { + return nil + } + out := new(SerializerDebeziumSerializerParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerDebeziumSerializerParametersObservation) DeepCopyInto(out *SerializerDebeziumSerializerParametersObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerDebeziumSerializerParametersObservation. +func (in *SerializerDebeziumSerializerParametersObservation) DeepCopy() *SerializerDebeziumSerializerParametersObservation { + if in == nil { + return nil + } + out := new(SerializerDebeziumSerializerParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerDebeziumSerializerParametersParameters) DeepCopyInto(out *SerializerDebeziumSerializerParametersParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerDebeziumSerializerParametersParameters. +func (in *SerializerDebeziumSerializerParametersParameters) DeepCopy() *SerializerDebeziumSerializerParametersParameters { + if in == nil { + return nil + } + out := new(SerializerDebeziumSerializerParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerInitParameters) DeepCopyInto(out *SerializerInitParameters) { + *out = *in + if in.SerializerAuto != nil { + in, out := &in.SerializerAuto, &out.SerializerAuto + *out = make([]SerializerAutoInitParameters, len(*in)) + copy(*out, *in) + } + if in.SerializerDebezium != nil { + in, out := &in.SerializerDebezium, &out.SerializerDebezium + *out = make([]SerializerDebeziumInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SerializerJSON != nil { + in, out := &in.SerializerJSON, &out.SerializerJSON + *out = make([]SerializerJSONInitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerInitParameters. +func (in *SerializerInitParameters) DeepCopy() *SerializerInitParameters { + if in == nil { + return nil + } + out := new(SerializerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerJSONInitParameters) DeepCopyInto(out *SerializerJSONInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerJSONInitParameters. +func (in *SerializerJSONInitParameters) DeepCopy() *SerializerJSONInitParameters { + if in == nil { + return nil + } + out := new(SerializerJSONInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerJSONObservation) DeepCopyInto(out *SerializerJSONObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerJSONObservation. +func (in *SerializerJSONObservation) DeepCopy() *SerializerJSONObservation { + if in == nil { + return nil + } + out := new(SerializerJSONObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerJSONParameters) DeepCopyInto(out *SerializerJSONParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerJSONParameters. +func (in *SerializerJSONParameters) DeepCopy() *SerializerJSONParameters { + if in == nil { + return nil + } + out := new(SerializerJSONParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerObservation) DeepCopyInto(out *SerializerObservation) { + *out = *in + if in.SerializerAuto != nil { + in, out := &in.SerializerAuto, &out.SerializerAuto + *out = make([]SerializerAutoParameters, len(*in)) + copy(*out, *in) + } + if in.SerializerDebezium != nil { + in, out := &in.SerializerDebezium, &out.SerializerDebezium + *out = make([]SerializerDebeziumObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SerializerJSON != nil { + in, out := &in.SerializerJSON, &out.SerializerJSON + *out = make([]SerializerJSONParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerObservation. +func (in *SerializerObservation) DeepCopy() *SerializerObservation { + if in == nil { + return nil + } + out := new(SerializerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerParameters) DeepCopyInto(out *SerializerParameters) { + *out = *in + if in.SerializerAuto != nil { + in, out := &in.SerializerAuto, &out.SerializerAuto + *out = make([]SerializerAutoParameters, len(*in)) + copy(*out, *in) + } + if in.SerializerDebezium != nil { + in, out := &in.SerializerDebezium, &out.SerializerDebezium + *out = make([]SerializerDebeziumParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SerializerJSON != nil { + in, out := &in.SerializerJSON, &out.SerializerJSON + *out = make([]SerializerJSONParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerParameters. +func (in *SerializerParameters) DeepCopy() *SerializerParameters { + if in == nil { + return nil + } + out := new(SerializerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerParametersInitParameters) DeepCopyInto(out *SerializerParametersInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerParametersInitParameters. +func (in *SerializerParametersInitParameters) DeepCopy() *SerializerParametersInitParameters { + if in == nil { + return nil + } + out := new(SerializerParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerParametersObservation) DeepCopyInto(out *SerializerParametersObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerParametersObservation. +func (in *SerializerParametersObservation) DeepCopy() *SerializerParametersObservation { + if in == nil { + return nil + } + out := new(SerializerParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerParametersParameters) DeepCopyInto(out *SerializerParametersParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerParametersParameters. +func (in *SerializerParametersParameters) DeepCopy() *SerializerParametersParameters { + if in == nil { + return nil + } + out := new(SerializerParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerSerializerAutoInitParameters) DeepCopyInto(out *SerializerSerializerAutoInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerSerializerAutoInitParameters. +func (in *SerializerSerializerAutoInitParameters) DeepCopy() *SerializerSerializerAutoInitParameters { + if in == nil { + return nil + } + out := new(SerializerSerializerAutoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerSerializerAutoObservation) DeepCopyInto(out *SerializerSerializerAutoObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerSerializerAutoObservation. +func (in *SerializerSerializerAutoObservation) DeepCopy() *SerializerSerializerAutoObservation { + if in == nil { + return nil + } + out := new(SerializerSerializerAutoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerSerializerAutoParameters) DeepCopyInto(out *SerializerSerializerAutoParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerSerializerAutoParameters. +func (in *SerializerSerializerAutoParameters) DeepCopy() *SerializerSerializerAutoParameters { + if in == nil { + return nil + } + out := new(SerializerSerializerAutoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerSerializerDebeziumInitParameters) DeepCopyInto(out *SerializerSerializerDebeziumInitParameters) { + *out = *in + if in.SerializerParameters != nil { + in, out := &in.SerializerParameters, &out.SerializerParameters + *out = make([]SerializerDebeziumSerializerParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerSerializerDebeziumInitParameters. +func (in *SerializerSerializerDebeziumInitParameters) DeepCopy() *SerializerSerializerDebeziumInitParameters { + if in == nil { + return nil + } + out := new(SerializerSerializerDebeziumInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerSerializerDebeziumObservation) DeepCopyInto(out *SerializerSerializerDebeziumObservation) { + *out = *in + if in.SerializerParameters != nil { + in, out := &in.SerializerParameters, &out.SerializerParameters + *out = make([]SerializerDebeziumSerializerParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerSerializerDebeziumObservation. +func (in *SerializerSerializerDebeziumObservation) DeepCopy() *SerializerSerializerDebeziumObservation { + if in == nil { + return nil + } + out := new(SerializerSerializerDebeziumObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerSerializerDebeziumParameters) DeepCopyInto(out *SerializerSerializerDebeziumParameters) { + *out = *in + if in.SerializerParameters != nil { + in, out := &in.SerializerParameters, &out.SerializerParameters + *out = make([]SerializerDebeziumSerializerParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerSerializerDebeziumParameters. +func (in *SerializerSerializerDebeziumParameters) DeepCopy() *SerializerSerializerDebeziumParameters { + if in == nil { + return nil + } + out := new(SerializerSerializerDebeziumParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerSerializerJSONInitParameters) DeepCopyInto(out *SerializerSerializerJSONInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerSerializerJSONInitParameters. +func (in *SerializerSerializerJSONInitParameters) DeepCopy() *SerializerSerializerJSONInitParameters { + if in == nil { + return nil + } + out := new(SerializerSerializerJSONInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerSerializerJSONObservation) DeepCopyInto(out *SerializerSerializerJSONObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerSerializerJSONObservation. +func (in *SerializerSerializerJSONObservation) DeepCopy() *SerializerSerializerJSONObservation { + if in == nil { + return nil + } + out := new(SerializerSerializerJSONObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerSerializerJSONParameters) DeepCopyInto(out *SerializerSerializerJSONParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerSerializerJSONParameters. +func (in *SerializerSerializerJSONParameters) DeepCopy() *SerializerSerializerJSONParameters { + if in == nil { + return nil + } + out := new(SerializerSerializerJSONParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsInitParameters) DeepCopyInto(out *SettingsInitParameters) { + *out = *in + if in.ClickhouseSource != nil { + in, out := &in.ClickhouseSource, &out.ClickhouseSource + *out = make([]ClickhouseSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClickhouseTarget != nil { + in, out := &in.ClickhouseTarget, &out.ClickhouseTarget + *out = make([]ClickhouseTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KafkaSource != nil { + in, out := &in.KafkaSource, &out.KafkaSource + *out = make([]KafkaSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KafkaTarget != nil { + in, out := &in.KafkaTarget, &out.KafkaTarget + *out = make([]KafkaTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetrikaSource != nil { + in, out := &in.MetrikaSource, &out.MetrikaSource + *out = make([]MetrikaSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MongoSource != nil { + in, out := &in.MongoSource, &out.MongoSource + *out = make([]MongoSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MongoTarget != nil { + in, out := &in.MongoTarget, &out.MongoTarget + *out = make([]MongoTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MySQLSource != nil { + in, out := &in.MySQLSource, &out.MySQLSource + *out = make([]MySQLSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MySQLTarget != nil { + in, out := &in.MySQLTarget, &out.MySQLTarget + *out = make([]MySQLTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostgresSource != nil { + in, out := &in.PostgresSource, &out.PostgresSource + *out = make([]PostgresSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostgresTarget != nil { + in, out := &in.PostgresTarget, &out.PostgresTarget + *out = make([]PostgresTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdbSource != nil { + in, out := &in.YdbSource, &out.YdbSource + *out = make([]YdbSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdbTarget != nil { + in, out := &in.YdbTarget, &out.YdbTarget + *out = make([]YdbTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdsSource != nil { + in, out := &in.YdsSource, &out.YdsSource + *out = make([]YdsSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdsTarget != nil { + in, out := &in.YdsTarget, &out.YdsTarget + *out = make([]YdsTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsInitParameters. +func (in *SettingsInitParameters) DeepCopy() *SettingsInitParameters { + if in == nil { + return nil + } + out := new(SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsObservation) DeepCopyInto(out *SettingsObservation) { + *out = *in + if in.ClickhouseSource != nil { + in, out := &in.ClickhouseSource, &out.ClickhouseSource + *out = make([]ClickhouseSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClickhouseTarget != nil { + in, out := &in.ClickhouseTarget, &out.ClickhouseTarget + *out = make([]ClickhouseTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KafkaSource != nil { + in, out := &in.KafkaSource, &out.KafkaSource + *out = make([]KafkaSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KafkaTarget != nil { + in, out := &in.KafkaTarget, &out.KafkaTarget + *out = make([]KafkaTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetrikaSource != nil { + in, out := &in.MetrikaSource, &out.MetrikaSource + *out = make([]MetrikaSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MongoSource != nil { + in, out := &in.MongoSource, &out.MongoSource + *out = make([]MongoSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MongoTarget != nil { + in, out := &in.MongoTarget, &out.MongoTarget + *out = make([]MongoTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MySQLSource != nil { + in, out := &in.MySQLSource, &out.MySQLSource + *out = make([]MySQLSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MySQLTarget != nil { + in, out := &in.MySQLTarget, &out.MySQLTarget + *out = make([]MySQLTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostgresSource != nil { + in, out := &in.PostgresSource, &out.PostgresSource + *out = make([]PostgresSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostgresTarget != nil { + in, out := &in.PostgresTarget, &out.PostgresTarget + *out = make([]PostgresTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdbSource != nil { + in, out := &in.YdbSource, &out.YdbSource + *out = make([]YdbSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdbTarget != nil { + in, out := &in.YdbTarget, &out.YdbTarget + *out = make([]YdbTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdsSource != nil { + in, out := &in.YdsSource, &out.YdsSource + *out = make([]YdsSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdsTarget != nil { + in, out := &in.YdsTarget, &out.YdsTarget + *out = make([]YdsTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsObservation. +func (in *SettingsObservation) DeepCopy() *SettingsObservation { + if in == nil { + return nil + } + out := new(SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsParameters) DeepCopyInto(out *SettingsParameters) { + *out = *in + if in.ClickhouseSource != nil { + in, out := &in.ClickhouseSource, &out.ClickhouseSource + *out = make([]ClickhouseSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClickhouseTarget != nil { + in, out := &in.ClickhouseTarget, &out.ClickhouseTarget + *out = make([]ClickhouseTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KafkaSource != nil { + in, out := &in.KafkaSource, &out.KafkaSource + *out = make([]KafkaSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KafkaTarget != nil { + in, out := &in.KafkaTarget, &out.KafkaTarget + *out = make([]KafkaTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetrikaSource != nil { + in, out := &in.MetrikaSource, &out.MetrikaSource + *out = make([]MetrikaSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MongoSource != nil { + in, out := &in.MongoSource, &out.MongoSource + *out = make([]MongoSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MongoTarget != nil { + in, out := &in.MongoTarget, &out.MongoTarget + *out = make([]MongoTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MySQLSource != nil { + in, out := &in.MySQLSource, &out.MySQLSource + *out = make([]MySQLSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MySQLTarget != nil { + in, out := &in.MySQLTarget, &out.MySQLTarget + *out = make([]MySQLTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostgresSource != nil { + in, out := &in.PostgresSource, &out.PostgresSource + *out = make([]PostgresSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostgresTarget != nil { + in, out := &in.PostgresTarget, &out.PostgresTarget + *out = make([]PostgresTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdbSource != nil { + in, out := &in.YdbSource, &out.YdbSource + *out = make([]YdbSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdbTarget != nil { + in, out := &in.YdbTarget, &out.YdbTarget + *out = make([]YdbTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdsSource != nil { + in, out := &in.YdsSource, &out.YdsSource + *out = make([]YdsSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YdsTarget != nil { + in, out := &in.YdsTarget, &out.YdsTarget + *out = make([]YdsTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsParameters. +func (in *SettingsParameters) DeepCopy() *SettingsParameters { + if in == nil { + return nil + } + out := new(SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharderTransformerColumnsInitParameters) DeepCopyInto(out *SharderTransformerColumnsInitParameters) { + *out = *in + if in.ExcludeColumns != nil { + in, out := &in.ExcludeColumns, &out.ExcludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeColumns != nil { + in, out := &in.IncludeColumns, &out.IncludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharderTransformerColumnsInitParameters. +func (in *SharderTransformerColumnsInitParameters) DeepCopy() *SharderTransformerColumnsInitParameters { + if in == nil { + return nil + } + out := new(SharderTransformerColumnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharderTransformerColumnsObservation) DeepCopyInto(out *SharderTransformerColumnsObservation) { + *out = *in + if in.ExcludeColumns != nil { + in, out := &in.ExcludeColumns, &out.ExcludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeColumns != nil { + in, out := &in.IncludeColumns, &out.IncludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharderTransformerColumnsObservation. +func (in *SharderTransformerColumnsObservation) DeepCopy() *SharderTransformerColumnsObservation { + if in == nil { + return nil + } + out := new(SharderTransformerColumnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharderTransformerColumnsParameters) DeepCopyInto(out *SharderTransformerColumnsParameters) { + *out = *in + if in.ExcludeColumns != nil { + in, out := &in.ExcludeColumns, &out.ExcludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeColumns != nil { + in, out := &in.IncludeColumns, &out.IncludeColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharderTransformerColumnsParameters. +func (in *SharderTransformerColumnsParameters) DeepCopy() *SharderTransformerColumnsParameters { + if in == nil { + return nil + } + out := new(SharderTransformerColumnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharderTransformerInitParameters) DeepCopyInto(out *SharderTransformerInitParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]SharderTransformerColumnsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardsCount != nil { + in, out := &in.ShardsCount, &out.ShardsCount + *out = new(float64) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]SharderTransformerTablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharderTransformerInitParameters. +func (in *SharderTransformerInitParameters) DeepCopy() *SharderTransformerInitParameters { + if in == nil { + return nil + } + out := new(SharderTransformerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharderTransformerObservation) DeepCopyInto(out *SharderTransformerObservation) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]SharderTransformerColumnsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardsCount != nil { + in, out := &in.ShardsCount, &out.ShardsCount + *out = new(float64) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]SharderTransformerTablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharderTransformerObservation. +func (in *SharderTransformerObservation) DeepCopy() *SharderTransformerObservation { + if in == nil { + return nil + } + out := new(SharderTransformerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharderTransformerParameters) DeepCopyInto(out *SharderTransformerParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]SharderTransformerColumnsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardsCount != nil { + in, out := &in.ShardsCount, &out.ShardsCount + *out = new(float64) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]SharderTransformerTablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharderTransformerParameters. +func (in *SharderTransformerParameters) DeepCopy() *SharderTransformerParameters { + if in == nil { + return nil + } + out := new(SharderTransformerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharderTransformerTablesInitParameters) DeepCopyInto(out *SharderTransformerTablesInitParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharderTransformerTablesInitParameters. +func (in *SharderTransformerTablesInitParameters) DeepCopy() *SharderTransformerTablesInitParameters { + if in == nil { + return nil + } + out := new(SharderTransformerTablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharderTransformerTablesObservation) DeepCopyInto(out *SharderTransformerTablesObservation) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharderTransformerTablesObservation. +func (in *SharderTransformerTablesObservation) DeepCopy() *SharderTransformerTablesObservation { + if in == nil { + return nil + } + out := new(SharderTransformerTablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharderTransformerTablesParameters) DeepCopyInto(out *SharderTransformerTablesParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharderTransformerTablesParameters. +func (in *SharderTransformerTablesParameters) DeepCopy() *SharderTransformerTablesParameters { + if in == nil { + return nil + } + out := new(SharderTransformerTablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingInitParameters) DeepCopyInto(out *ShardingInitParameters) { + *out = *in + if in.ColumnValueHash != nil { + in, out := &in.ColumnValueHash, &out.ColumnValueHash + *out = make([]ColumnValueHashInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomMapping != nil { + in, out := &in.CustomMapping, &out.CustomMapping + *out = make([]CustomMappingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoundRobin != nil { + in, out := &in.RoundRobin, &out.RoundRobin + *out = make([]RoundRobinInitParameters, len(*in)) + copy(*out, *in) + } + if in.TransferID != nil { + in, out := &in.TransferID, &out.TransferID + *out = make([]TransferIDInitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingInitParameters. +func (in *ShardingInitParameters) DeepCopy() *ShardingInitParameters { + if in == nil { + return nil + } + out := new(ShardingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingObservation) DeepCopyInto(out *ShardingObservation) { + *out = *in + if in.ColumnValueHash != nil { + in, out := &in.ColumnValueHash, &out.ColumnValueHash + *out = make([]ColumnValueHashObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomMapping != nil { + in, out := &in.CustomMapping, &out.CustomMapping + *out = make([]CustomMappingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoundRobin != nil { + in, out := &in.RoundRobin, &out.RoundRobin + *out = make([]RoundRobinParameters, len(*in)) + copy(*out, *in) + } + if in.TransferID != nil { + in, out := &in.TransferID, &out.TransferID + *out = make([]TransferIDParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingObservation. +func (in *ShardingObservation) DeepCopy() *ShardingObservation { + if in == nil { + return nil + } + out := new(ShardingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingParameters) DeepCopyInto(out *ShardingParameters) { + *out = *in + if in.ColumnValueHash != nil { + in, out := &in.ColumnValueHash, &out.ColumnValueHash + *out = make([]ColumnValueHashParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomMapping != nil { + in, out := &in.CustomMapping, &out.CustomMapping + *out = make([]CustomMappingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoundRobin != nil { + in, out := &in.RoundRobin, &out.RoundRobin + *out = make([]RoundRobinParameters, len(*in)) + copy(*out, *in) + } + if in.TransferID != nil { + in, out := &in.TransferID, &out.TransferID + *out = make([]TransferIDParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingParameters. +func (in *ShardingParameters) DeepCopy() *ShardingParameters { + if in == nil { + return nil + } + out := new(ShardingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardsInitParameters) DeepCopyInto(out *ShardsInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardsInitParameters. +func (in *ShardsInitParameters) DeepCopy() *ShardsInitParameters { + if in == nil { + return nil + } + out := new(ShardsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardsObservation) DeepCopyInto(out *ShardsObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardsObservation. +func (in *ShardsObservation) DeepCopy() *ShardsObservation { + if in == nil { + return nil + } + out := new(ShardsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardsParameters) DeepCopyInto(out *ShardsParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardsParameters. +func (in *ShardsParameters) DeepCopy() *ShardsParameters { + if in == nil { + return nil + } + out := new(ShardsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamsInitParameters) DeepCopyInto(out *StreamsInitParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamsInitParameters. +func (in *StreamsInitParameters) DeepCopy() *StreamsInitParameters { + if in == nil { + return nil + } + out := new(StreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamsObservation) DeepCopyInto(out *StreamsObservation) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamsObservation. +func (in *StreamsObservation) DeepCopy() *StreamsObservation { + if in == nil { + return nil + } + out := new(StreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamsParameters) DeepCopyInto(out *StreamsParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamsParameters. +func (in *StreamsParameters) DeepCopy() *StreamsParameters { + if in == nil { + return nil + } + out := new(StreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSModeDisabledInitParameters) DeepCopyInto(out *TLSModeDisabledInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSModeDisabledInitParameters. +func (in *TLSModeDisabledInitParameters) DeepCopy() *TLSModeDisabledInitParameters { + if in == nil { + return nil + } + out := new(TLSModeDisabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSModeDisabledObservation) DeepCopyInto(out *TLSModeDisabledObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSModeDisabledObservation. +func (in *TLSModeDisabledObservation) DeepCopy() *TLSModeDisabledObservation { + if in == nil { + return nil + } + out := new(TLSModeDisabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSModeDisabledParameters) DeepCopyInto(out *TLSModeDisabledParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSModeDisabledParameters. +func (in *TLSModeDisabledParameters) DeepCopy() *TLSModeDisabledParameters { + if in == nil { + return nil + } + out := new(TLSModeDisabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSModeEnabledInitParameters) DeepCopyInto(out *TLSModeEnabledInitParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSModeEnabledInitParameters. +func (in *TLSModeEnabledInitParameters) DeepCopy() *TLSModeEnabledInitParameters { + if in == nil { + return nil + } + out := new(TLSModeEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSModeEnabledObservation) DeepCopyInto(out *TLSModeEnabledObservation) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSModeEnabledObservation. +func (in *TLSModeEnabledObservation) DeepCopy() *TLSModeEnabledObservation { + if in == nil { + return nil + } + out := new(TLSModeEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSModeEnabledParameters) DeepCopyInto(out *TLSModeEnabledParameters) { + *out = *in + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSModeEnabledParameters. +func (in *TLSModeEnabledParameters) DeepCopy() *TLSModeEnabledParameters { + if in == nil { + return nil + } + out := new(TLSModeEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSModeInitParameters) DeepCopyInto(out *TLSModeInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]DisabledInitParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]EnabledInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSModeInitParameters. +func (in *TLSModeInitParameters) DeepCopy() *TLSModeInitParameters { + if in == nil { + return nil + } + out := new(TLSModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSModeObservation) DeepCopyInto(out *TLSModeObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]DisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]EnabledObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSModeObservation. +func (in *TLSModeObservation) DeepCopy() *TLSModeObservation { + if in == nil { + return nil + } + out := new(TLSModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSModeParameters) DeepCopyInto(out *TLSModeParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]DisabledParameters, len(*in)) + copy(*out, *in) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]EnabledParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSModeParameters. +func (in *TLSModeParameters) DeepCopy() *TLSModeParameters { + if in == nil { + return nil + } + out := new(TLSModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSplitterTransformerInitParameters) DeepCopyInto(out *TableSplitterTransformerInitParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Splitter != nil { + in, out := &in.Splitter, &out.Splitter + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]TableSplitterTransformerTablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSplitterTransformerInitParameters. +func (in *TableSplitterTransformerInitParameters) DeepCopy() *TableSplitterTransformerInitParameters { + if in == nil { + return nil + } + out := new(TableSplitterTransformerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSplitterTransformerObservation) DeepCopyInto(out *TableSplitterTransformerObservation) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Splitter != nil { + in, out := &in.Splitter, &out.Splitter + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]TableSplitterTransformerTablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSplitterTransformerObservation. +func (in *TableSplitterTransformerObservation) DeepCopy() *TableSplitterTransformerObservation { + if in == nil { + return nil + } + out := new(TableSplitterTransformerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSplitterTransformerParameters) DeepCopyInto(out *TableSplitterTransformerParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Splitter != nil { + in, out := &in.Splitter, &out.Splitter + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]TableSplitterTransformerTablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSplitterTransformerParameters. +func (in *TableSplitterTransformerParameters) DeepCopy() *TableSplitterTransformerParameters { + if in == nil { + return nil + } + out := new(TableSplitterTransformerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSplitterTransformerTablesInitParameters) DeepCopyInto(out *TableSplitterTransformerTablesInitParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSplitterTransformerTablesInitParameters. +func (in *TableSplitterTransformerTablesInitParameters) DeepCopy() *TableSplitterTransformerTablesInitParameters { + if in == nil { + return nil + } + out := new(TableSplitterTransformerTablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSplitterTransformerTablesObservation) DeepCopyInto(out *TableSplitterTransformerTablesObservation) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSplitterTransformerTablesObservation. +func (in *TableSplitterTransformerTablesObservation) DeepCopy() *TableSplitterTransformerTablesObservation { + if in == nil { + return nil + } + out := new(TableSplitterTransformerTablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSplitterTransformerTablesParameters) DeepCopyInto(out *TableSplitterTransformerTablesParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSplitterTransformerTablesParameters. +func (in *TableSplitterTransformerTablesParameters) DeepCopy() *TableSplitterTransformerTablesParameters { + if in == nil { + return nil + } + out := new(TableSplitterTransformerTablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TablesInitParameters) DeepCopyInto(out *TablesInitParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TablesInitParameters. +func (in *TablesInitParameters) DeepCopy() *TablesInitParameters { + if in == nil { + return nil + } + out := new(TablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TablesObservation) DeepCopyInto(out *TablesObservation) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TablesObservation. +func (in *TablesObservation) DeepCopy() *TablesObservation { + if in == nil { + return nil + } + out := new(TablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TablesParameters) DeepCopyInto(out *TablesParameters) { + *out = *in + if in.ExcludeTables != nil { + in, out := &in.ExcludeTables, &out.ExcludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeTables != nil { + in, out := &in.IncludeTables, &out.IncludeTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TablesParameters. +func (in *TablesParameters) DeepCopy() *TablesParameters { + if in == nil { + return nil + } + out := new(TablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenInitParameters) DeepCopyInto(out *TokenInitParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenInitParameters. +func (in *TokenInitParameters) DeepCopy() *TokenInitParameters { + if in == nil { + return nil + } + out := new(TokenInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenObservation) DeepCopyInto(out *TokenObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenObservation. +func (in *TokenObservation) DeepCopy() *TokenObservation { + if in == nil { + return nil + } + out := new(TokenObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenParameters) DeepCopyInto(out *TokenParameters) { + *out = *in + if in.RawSecretRef != nil { + in, out := &in.RawSecretRef, &out.RawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenParameters. +func (in *TokenParameters) DeepCopy() *TokenParameters { + if in == nil { + return nil + } + out := new(TokenParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInitParameters) DeepCopyInto(out *TopicInitParameters) { + *out = *in + if in.SaveTxOrder != nil { + in, out := &in.SaveTxOrder, &out.SaveTxOrder + *out = new(bool) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInitParameters. +func (in *TopicInitParameters) DeepCopy() *TopicInitParameters { + if in == nil { + return nil + } + out := new(TopicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicObservation) DeepCopyInto(out *TopicObservation) { + *out = *in + if in.SaveTxOrder != nil { + in, out := &in.SaveTxOrder, &out.SaveTxOrder + *out = new(bool) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicObservation. +func (in *TopicObservation) DeepCopy() *TopicObservation { + if in == nil { + return nil + } + out := new(TopicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicParameters) DeepCopyInto(out *TopicParameters) { + *out = *in + if in.SaveTxOrder != nil { + in, out := &in.SaveTxOrder, &out.SaveTxOrder + *out = new(bool) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicParameters. +func (in *TopicParameters) DeepCopy() *TopicParameters { + if in == nil { + return nil + } + out := new(TopicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicSettingsInitParameters) DeepCopyInto(out *TopicSettingsInitParameters) { + *out = *in + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = make([]TopicInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopicPrefix != nil { + in, out := &in.TopicPrefix, &out.TopicPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicSettingsInitParameters. +func (in *TopicSettingsInitParameters) DeepCopy() *TopicSettingsInitParameters { + if in == nil { + return nil + } + out := new(TopicSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicSettingsObservation) DeepCopyInto(out *TopicSettingsObservation) { + *out = *in + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = make([]TopicObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopicPrefix != nil { + in, out := &in.TopicPrefix, &out.TopicPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicSettingsObservation. +func (in *TopicSettingsObservation) DeepCopy() *TopicSettingsObservation { + if in == nil { + return nil + } + out := new(TopicSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicSettingsParameters) DeepCopyInto(out *TopicSettingsParameters) { + *out = *in + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = make([]TopicParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopicPrefix != nil { + in, out := &in.TopicPrefix, &out.TopicPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicSettingsParameters. +func (in *TopicSettingsParameters) DeepCopy() *TopicSettingsParameters { + if in == nil { + return nil + } + out := new(TopicSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Transfer) DeepCopyInto(out *Transfer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transfer. +func (in *Transfer) DeepCopy() *Transfer { + if in == nil { + return nil + } + out := new(Transfer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Transfer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferIDInitParameters) DeepCopyInto(out *TransferIDInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferIDInitParameters. +func (in *TransferIDInitParameters) DeepCopy() *TransferIDInitParameters { + if in == nil { + return nil + } + out := new(TransferIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferIDObservation) DeepCopyInto(out *TransferIDObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferIDObservation. +func (in *TransferIDObservation) DeepCopy() *TransferIDObservation { + if in == nil { + return nil + } + out := new(TransferIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferIDParameters) DeepCopyInto(out *TransferIDParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferIDParameters. +func (in *TransferIDParameters) DeepCopy() *TransferIDParameters { + if in == nil { + return nil + } + out := new(TransferIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferInitParameters) DeepCopyInto(out *TransferInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OnCreateActivateMode != nil { + in, out := &in.OnCreateActivateMode, &out.OnCreateActivateMode + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = make([]RuntimeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceID != nil { + in, out := &in.SourceID, &out.SourceID + *out = new(string) + **out = **in + } + if in.SourceIDRef != nil { + in, out := &in.SourceIDRef, &out.SourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceIDSelector != nil { + in, out := &in.SourceIDSelector, &out.SourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetID != nil { + in, out := &in.TargetID, &out.TargetID + *out = new(string) + **out = **in + } + if in.TargetIDRef != nil { + in, out := &in.TargetIDRef, &out.TargetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetIDSelector != nil { + in, out := &in.TargetIDSelector, &out.TargetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Transformation != nil { + in, out := &in.Transformation, &out.Transformation + *out = make([]TransformationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferInitParameters. +func (in *TransferInitParameters) DeepCopy() *TransferInitParameters { + if in == nil { + return nil + } + out := new(TransferInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferList) DeepCopyInto(out *TransferList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Transfer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferList. +func (in *TransferList) DeepCopy() *TransferList { + if in == nil { + return nil + } + out := new(TransferList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TransferList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferObservation) DeepCopyInto(out *TransferObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OnCreateActivateMode != nil { + in, out := &in.OnCreateActivateMode, &out.OnCreateActivateMode + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = make([]RuntimeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceID != nil { + in, out := &in.SourceID, &out.SourceID + *out = new(string) + **out = **in + } + if in.TargetID != nil { + in, out := &in.TargetID, &out.TargetID + *out = new(string) + **out = **in + } + if in.Transformation != nil { + in, out := &in.Transformation, &out.Transformation + *out = make([]TransformationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Warning != nil { + in, out := &in.Warning, &out.Warning + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferObservation. +func (in *TransferObservation) DeepCopy() *TransferObservation { + if in == nil { + return nil + } + out := new(TransferObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferParameters) DeepCopyInto(out *TransferParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OnCreateActivateMode != nil { + in, out := &in.OnCreateActivateMode, &out.OnCreateActivateMode + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = make([]RuntimeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceID != nil { + in, out := &in.SourceID, &out.SourceID + *out = new(string) + **out = **in + } + if in.SourceIDRef != nil { + in, out := &in.SourceIDRef, &out.SourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceIDSelector != nil { + in, out := &in.SourceIDSelector, &out.SourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetID != nil { + in, out := &in.TargetID, &out.TargetID + *out = new(string) + **out = **in + } + if in.TargetIDRef != nil { + in, out := &in.TargetIDRef, &out.TargetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetIDSelector != nil { + in, out := &in.TargetIDSelector, &out.TargetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Transformation != nil { + in, out := &in.Transformation, &out.Transformation + *out = make([]TransformationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferParameters. +func (in *TransferParameters) DeepCopy() *TransferParameters { + if in == nil { + return nil + } + out := new(TransferParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferSpec) DeepCopyInto(out *TransferSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferSpec. +func (in *TransferSpec) DeepCopy() *TransferSpec { + if in == nil { + return nil + } + out := new(TransferSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferStatus) DeepCopyInto(out *TransferStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferStatus. +func (in *TransferStatus) DeepCopy() *TransferStatus { + if in == nil { + return nil + } + out := new(TransferStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationInitParameters) DeepCopyInto(out *TransformationInitParameters) { + *out = *in + if in.Transformers != nil { + in, out := &in.Transformers, &out.Transformers + *out = make([]TransformersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationInitParameters. +func (in *TransformationInitParameters) DeepCopy() *TransformationInitParameters { + if in == nil { + return nil + } + out := new(TransformationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationObservation) DeepCopyInto(out *TransformationObservation) { + *out = *in + if in.Transformers != nil { + in, out := &in.Transformers, &out.Transformers + *out = make([]TransformersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationObservation. +func (in *TransformationObservation) DeepCopy() *TransformationObservation { + if in == nil { + return nil + } + out := new(TransformationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationParameters) DeepCopyInto(out *TransformationParameters) { + *out = *in + if in.Transformers != nil { + in, out := &in.Transformers, &out.Transformers + *out = make([]TransformersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationParameters. +func (in *TransformationParameters) DeepCopy() *TransformationParameters { + if in == nil { + return nil + } + out := new(TransformationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformerInitParameters) DeepCopyInto(out *TransformerInitParameters) { + *out = *in + if in.BufferFlushInterval != nil { + in, out := &in.BufferFlushInterval, &out.BufferFlushInterval + *out = new(string) + **out = **in + } + if in.BufferSize != nil { + in, out := &in.BufferSize, &out.BufferSize + *out = new(string) + **out = **in + } + if in.CloudFunction != nil { + in, out := &in.CloudFunction, &out.CloudFunction + *out = new(string) + **out = **in + } + if in.InvocationTimeout != nil { + in, out := &in.InvocationTimeout, &out.InvocationTimeout + *out = new(string) + **out = **in + } + if in.NumberOfRetries != nil { + in, out := &in.NumberOfRetries, &out.NumberOfRetries + *out = new(float64) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformerInitParameters. +func (in *TransformerInitParameters) DeepCopy() *TransformerInitParameters { + if in == nil { + return nil + } + out := new(TransformerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformerObservation) DeepCopyInto(out *TransformerObservation) { + *out = *in + if in.BufferFlushInterval != nil { + in, out := &in.BufferFlushInterval, &out.BufferFlushInterval + *out = new(string) + **out = **in + } + if in.BufferSize != nil { + in, out := &in.BufferSize, &out.BufferSize + *out = new(string) + **out = **in + } + if in.CloudFunction != nil { + in, out := &in.CloudFunction, &out.CloudFunction + *out = new(string) + **out = **in + } + if in.InvocationTimeout != nil { + in, out := &in.InvocationTimeout, &out.InvocationTimeout + *out = new(string) + **out = **in + } + if in.NumberOfRetries != nil { + in, out := &in.NumberOfRetries, &out.NumberOfRetries + *out = new(float64) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformerObservation. +func (in *TransformerObservation) DeepCopy() *TransformerObservation { + if in == nil { + return nil + } + out := new(TransformerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformerParameters) DeepCopyInto(out *TransformerParameters) { + *out = *in + if in.BufferFlushInterval != nil { + in, out := &in.BufferFlushInterval, &out.BufferFlushInterval + *out = new(string) + **out = **in + } + if in.BufferSize != nil { + in, out := &in.BufferSize, &out.BufferSize + *out = new(string) + **out = **in + } + if in.CloudFunction != nil { + in, out := &in.CloudFunction, &out.CloudFunction + *out = new(string) + **out = **in + } + if in.InvocationTimeout != nil { + in, out := &in.InvocationTimeout, &out.InvocationTimeout + *out = new(string) + **out = **in + } + if in.NumberOfRetries != nil { + in, out := &in.NumberOfRetries, &out.NumberOfRetries + *out = new(float64) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformerParameters. +func (in *TransformerParameters) DeepCopy() *TransformerParameters { + if in == nil { + return nil + } + out := new(TransformerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformersInitParameters) DeepCopyInto(out *TransformersInitParameters) { + *out = *in + if in.ConvertToString != nil { + in, out := &in.ConvertToString, &out.ConvertToString + *out = make([]ConvertToStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilterColumns != nil { + in, out := &in.FilterColumns, &out.FilterColumns + *out = make([]FilterColumnsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilterRows != nil { + in, out := &in.FilterRows, &out.FilterRows + *out = make([]FilterRowsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaskField != nil { + in, out := &in.MaskField, &out.MaskField + *out = make([]MaskFieldInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RenameTables != nil { + in, out := &in.RenameTables, &out.RenameTables + *out = make([]RenameTablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReplacePrimaryKey != nil { + in, out := &in.ReplacePrimaryKey, &out.ReplacePrimaryKey + *out = make([]ReplacePrimaryKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SharderTransformer != nil { + in, out := &in.SharderTransformer, &out.SharderTransformer + *out = make([]SharderTransformerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TableSplitterTransformer != nil { + in, out := &in.TableSplitterTransformer, &out.TableSplitterTransformer + *out = make([]TableSplitterTransformerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformersInitParameters. +func (in *TransformersInitParameters) DeepCopy() *TransformersInitParameters { + if in == nil { + return nil + } + out := new(TransformersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformersObservation) DeepCopyInto(out *TransformersObservation) { + *out = *in + if in.ConvertToString != nil { + in, out := &in.ConvertToString, &out.ConvertToString + *out = make([]ConvertToStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilterColumns != nil { + in, out := &in.FilterColumns, &out.FilterColumns + *out = make([]FilterColumnsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilterRows != nil { + in, out := &in.FilterRows, &out.FilterRows + *out = make([]FilterRowsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaskField != nil { + in, out := &in.MaskField, &out.MaskField + *out = make([]MaskFieldObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RenameTables != nil { + in, out := &in.RenameTables, &out.RenameTables + *out = make([]RenameTablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReplacePrimaryKey != nil { + in, out := &in.ReplacePrimaryKey, &out.ReplacePrimaryKey + *out = make([]ReplacePrimaryKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SharderTransformer != nil { + in, out := &in.SharderTransformer, &out.SharderTransformer + *out = make([]SharderTransformerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TableSplitterTransformer != nil { + in, out := &in.TableSplitterTransformer, &out.TableSplitterTransformer + *out = make([]TableSplitterTransformerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformersObservation. +func (in *TransformersObservation) DeepCopy() *TransformersObservation { + if in == nil { + return nil + } + out := new(TransformersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformersParameters) DeepCopyInto(out *TransformersParameters) { + *out = *in + if in.ConvertToString != nil { + in, out := &in.ConvertToString, &out.ConvertToString + *out = make([]ConvertToStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilterColumns != nil { + in, out := &in.FilterColumns, &out.FilterColumns + *out = make([]FilterColumnsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilterRows != nil { + in, out := &in.FilterRows, &out.FilterRows + *out = make([]FilterRowsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaskField != nil { + in, out := &in.MaskField, &out.MaskField + *out = make([]MaskFieldParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RenameTables != nil { + in, out := &in.RenameTables, &out.RenameTables + *out = make([]RenameTablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReplacePrimaryKey != nil { + in, out := &in.ReplacePrimaryKey, &out.ReplacePrimaryKey + *out = make([]ReplacePrimaryKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SharderTransformer != nil { + in, out := &in.SharderTransformer, &out.SharderTransformer + *out = make([]SharderTransformerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TableSplitterTransformer != nil { + in, out := &in.TableSplitterTransformer, &out.TableSplitterTransformer + *out = make([]TableSplitterTransformerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformersParameters. +func (in *TransformersParameters) DeepCopy() *TransformersParameters { + if in == nil { + return nil + } + out := new(TransformersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserDataSchemaFieldsFieldsInitParameters) DeepCopyInto(out *TskvParserDataSchemaFieldsFieldsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserDataSchemaFieldsFieldsInitParameters. +func (in *TskvParserDataSchemaFieldsFieldsInitParameters) DeepCopy() *TskvParserDataSchemaFieldsFieldsInitParameters { + if in == nil { + return nil + } + out := new(TskvParserDataSchemaFieldsFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserDataSchemaFieldsFieldsObservation) DeepCopyInto(out *TskvParserDataSchemaFieldsFieldsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserDataSchemaFieldsFieldsObservation. +func (in *TskvParserDataSchemaFieldsFieldsObservation) DeepCopy() *TskvParserDataSchemaFieldsFieldsObservation { + if in == nil { + return nil + } + out := new(TskvParserDataSchemaFieldsFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserDataSchemaFieldsFieldsParameters) DeepCopyInto(out *TskvParserDataSchemaFieldsFieldsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserDataSchemaFieldsFieldsParameters. +func (in *TskvParserDataSchemaFieldsFieldsParameters) DeepCopy() *TskvParserDataSchemaFieldsFieldsParameters { + if in == nil { + return nil + } + out := new(TskvParserDataSchemaFieldsFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserDataSchemaFieldsInitParameters) DeepCopyInto(out *TskvParserDataSchemaFieldsInitParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]TskvParserDataSchemaFieldsFieldsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserDataSchemaFieldsInitParameters. +func (in *TskvParserDataSchemaFieldsInitParameters) DeepCopy() *TskvParserDataSchemaFieldsInitParameters { + if in == nil { + return nil + } + out := new(TskvParserDataSchemaFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserDataSchemaFieldsObservation) DeepCopyInto(out *TskvParserDataSchemaFieldsObservation) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]TskvParserDataSchemaFieldsFieldsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserDataSchemaFieldsObservation. +func (in *TskvParserDataSchemaFieldsObservation) DeepCopy() *TskvParserDataSchemaFieldsObservation { + if in == nil { + return nil + } + out := new(TskvParserDataSchemaFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserDataSchemaFieldsParameters) DeepCopyInto(out *TskvParserDataSchemaFieldsParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]TskvParserDataSchemaFieldsFieldsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserDataSchemaFieldsParameters. +func (in *TskvParserDataSchemaFieldsParameters) DeepCopy() *TskvParserDataSchemaFieldsParameters { + if in == nil { + return nil + } + out := new(TskvParserDataSchemaFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserDataSchemaInitParameters) DeepCopyInto(out *TskvParserDataSchemaInitParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]DataSchemaFieldsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserDataSchemaInitParameters. +func (in *TskvParserDataSchemaInitParameters) DeepCopy() *TskvParserDataSchemaInitParameters { + if in == nil { + return nil + } + out := new(TskvParserDataSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserDataSchemaObservation) DeepCopyInto(out *TskvParserDataSchemaObservation) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]DataSchemaFieldsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserDataSchemaObservation. +func (in *TskvParserDataSchemaObservation) DeepCopy() *TskvParserDataSchemaObservation { + if in == nil { + return nil + } + out := new(TskvParserDataSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserDataSchemaParameters) DeepCopyInto(out *TskvParserDataSchemaParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]DataSchemaFieldsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JSONFields != nil { + in, out := &in.JSONFields, &out.JSONFields + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserDataSchemaParameters. +func (in *TskvParserDataSchemaParameters) DeepCopy() *TskvParserDataSchemaParameters { + if in == nil { + return nil + } + out := new(TskvParserDataSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserInitParameters) DeepCopyInto(out *TskvParserInitParameters) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]TskvParserDataSchemaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserInitParameters. +func (in *TskvParserInitParameters) DeepCopy() *TskvParserInitParameters { + if in == nil { + return nil + } + out := new(TskvParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserObservation) DeepCopyInto(out *TskvParserObservation) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]TskvParserDataSchemaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserObservation. +func (in *TskvParserObservation) DeepCopy() *TskvParserObservation { + if in == nil { + return nil + } + out := new(TskvParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TskvParserParameters) DeepCopyInto(out *TskvParserParameters) { + *out = *in + if in.AddRestColumn != nil { + in, out := &in.AddRestColumn, &out.AddRestColumn + *out = new(bool) + **out = **in + } + if in.DataSchema != nil { + in, out := &in.DataSchema, &out.DataSchema + *out = make([]TskvParserDataSchemaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NullKeysAllowed != nil { + in, out := &in.NullKeysAllowed, &out.NullKeysAllowed + *out = new(bool) + **out = **in + } + if in.UnescapeStringValues != nil { + in, out := &in.UnescapeStringValues, &out.UnescapeStringValues + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TskvParserParameters. +func (in *TskvParserParameters) DeepCopy() *TskvParserParameters { + if in == nil { + return nil + } + out := new(TskvParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UploadShardParamsInitParameters) DeepCopyInto(out *UploadShardParamsInitParameters) { + *out = *in + if in.JobCount != nil { + in, out := &in.JobCount, &out.JobCount + *out = new(float64) + **out = **in + } + if in.ProcessCount != nil { + in, out := &in.ProcessCount, &out.ProcessCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploadShardParamsInitParameters. +func (in *UploadShardParamsInitParameters) DeepCopy() *UploadShardParamsInitParameters { + if in == nil { + return nil + } + out := new(UploadShardParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UploadShardParamsObservation) DeepCopyInto(out *UploadShardParamsObservation) { + *out = *in + if in.JobCount != nil { + in, out := &in.JobCount, &out.JobCount + *out = new(float64) + **out = **in + } + if in.ProcessCount != nil { + in, out := &in.ProcessCount, &out.ProcessCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploadShardParamsObservation. +func (in *UploadShardParamsObservation) DeepCopy() *UploadShardParamsObservation { + if in == nil { + return nil + } + out := new(UploadShardParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UploadShardParamsParameters) DeepCopyInto(out *UploadShardParamsParameters) { + *out = *in + if in.JobCount != nil { + in, out := &in.JobCount, &out.JobCount + *out = new(float64) + **out = **in + } + if in.ProcessCount != nil { + in, out := &in.ProcessCount, &out.ProcessCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploadShardParamsParameters. +func (in *UploadShardParamsParameters) DeepCopy() *UploadShardParamsParameters { + if in == nil { + return nil + } + out := new(UploadShardParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YcRuntimeInitParameters) DeepCopyInto(out *YcRuntimeInitParameters) { + *out = *in + if in.JobCount != nil { + in, out := &in.JobCount, &out.JobCount + *out = new(float64) + **out = **in + } + if in.UploadShardParams != nil { + in, out := &in.UploadShardParams, &out.UploadShardParams + *out = make([]UploadShardParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YcRuntimeInitParameters. +func (in *YcRuntimeInitParameters) DeepCopy() *YcRuntimeInitParameters { + if in == nil { + return nil + } + out := new(YcRuntimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YcRuntimeObservation) DeepCopyInto(out *YcRuntimeObservation) { + *out = *in + if in.JobCount != nil { + in, out := &in.JobCount, &out.JobCount + *out = new(float64) + **out = **in + } + if in.UploadShardParams != nil { + in, out := &in.UploadShardParams, &out.UploadShardParams + *out = make([]UploadShardParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YcRuntimeObservation. +func (in *YcRuntimeObservation) DeepCopy() *YcRuntimeObservation { + if in == nil { + return nil + } + out := new(YcRuntimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YcRuntimeParameters) DeepCopyInto(out *YcRuntimeParameters) { + *out = *in + if in.JobCount != nil { + in, out := &in.JobCount, &out.JobCount + *out = new(float64) + **out = **in + } + if in.UploadShardParams != nil { + in, out := &in.UploadShardParams, &out.UploadShardParams + *out = make([]UploadShardParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YcRuntimeParameters. +func (in *YcRuntimeParameters) DeepCopy() *YcRuntimeParameters { + if in == nil { + return nil + } + out := new(YcRuntimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdbSourceInitParameters) DeepCopyInto(out *YdbSourceInitParameters) { + *out = *in + if in.ChangefeedCustomName != nil { + in, out := &in.ChangefeedCustomName, &out.ChangefeedCustomName + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Instance != nil { + in, out := &in.Instance, &out.Instance + *out = new(string) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SaKeyContentSecretRef != nil { + in, out := &in.SaKeyContentSecretRef, &out.SaKeyContentSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdbSourceInitParameters. +func (in *YdbSourceInitParameters) DeepCopy() *YdbSourceInitParameters { + if in == nil { + return nil + } + out := new(YdbSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdbSourceObservation) DeepCopyInto(out *YdbSourceObservation) { + *out = *in + if in.ChangefeedCustomName != nil { + in, out := &in.ChangefeedCustomName, &out.ChangefeedCustomName + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Instance != nil { + in, out := &in.Instance, &out.Instance + *out = new(string) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdbSourceObservation. +func (in *YdbSourceObservation) DeepCopy() *YdbSourceObservation { + if in == nil { + return nil + } + out := new(YdbSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdbSourceParameters) DeepCopyInto(out *YdbSourceParameters) { + *out = *in + if in.ChangefeedCustomName != nil { + in, out := &in.ChangefeedCustomName, &out.ChangefeedCustomName + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Instance != nil { + in, out := &in.Instance, &out.Instance + *out = new(string) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SaKeyContentSecretRef != nil { + in, out := &in.SaKeyContentSecretRef, &out.SaKeyContentSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdbSourceParameters. +func (in *YdbSourceParameters) DeepCopy() *YdbSourceParameters { + if in == nil { + return nil + } + out := new(YdbSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdbTargetInitParameters) DeepCopyInto(out *YdbTargetInitParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DefaultCompression != nil { + in, out := &in.DefaultCompression, &out.DefaultCompression + *out = new(string) + **out = **in + } + if in.Instance != nil { + in, out := &in.Instance, &out.Instance + *out = new(string) + **out = **in + } + if in.IsTableColumnOriented != nil { + in, out := &in.IsTableColumnOriented, &out.IsTableColumnOriented + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SaKeyContentSecretRef != nil { + in, out := &in.SaKeyContentSecretRef, &out.SaKeyContentSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdbTargetInitParameters. +func (in *YdbTargetInitParameters) DeepCopy() *YdbTargetInitParameters { + if in == nil { + return nil + } + out := new(YdbTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdbTargetObservation) DeepCopyInto(out *YdbTargetObservation) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DefaultCompression != nil { + in, out := &in.DefaultCompression, &out.DefaultCompression + *out = new(string) + **out = **in + } + if in.Instance != nil { + in, out := &in.Instance, &out.Instance + *out = new(string) + **out = **in + } + if in.IsTableColumnOriented != nil { + in, out := &in.IsTableColumnOriented, &out.IsTableColumnOriented + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdbTargetObservation. +func (in *YdbTargetObservation) DeepCopy() *YdbTargetObservation { + if in == nil { + return nil + } + out := new(YdbTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdbTargetParameters) DeepCopyInto(out *YdbTargetParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DefaultCompression != nil { + in, out := &in.DefaultCompression, &out.DefaultCompression + *out = new(string) + **out = **in + } + if in.Instance != nil { + in, out := &in.Instance, &out.Instance + *out = new(string) + **out = **in + } + if in.IsTableColumnOriented != nil { + in, out := &in.IsTableColumnOriented, &out.IsTableColumnOriented + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SaKeyContentSecretRef != nil { + in, out := &in.SaKeyContentSecretRef, &out.SaKeyContentSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdbTargetParameters. +func (in *YdbTargetParameters) DeepCopy() *YdbTargetParameters { + if in == nil { + return nil + } + out := new(YdbTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsSourceInitParameters) DeepCopyInto(out *YdsSourceInitParameters) { + *out = *in + if in.AllowTTLRewind != nil { + in, out := &in.AllowTTLRewind, &out.AllowTTLRewind + *out = new(bool) + **out = **in + } + if in.Consumer != nil { + in, out := &in.Consumer, &out.Consumer + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Parser != nil { + in, out := &in.Parser, &out.Parser + *out = make([]YdsSourceParserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsSourceInitParameters. +func (in *YdsSourceInitParameters) DeepCopy() *YdsSourceInitParameters { + if in == nil { + return nil + } + out := new(YdsSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsSourceObservation) DeepCopyInto(out *YdsSourceObservation) { + *out = *in + if in.AllowTTLRewind != nil { + in, out := &in.AllowTTLRewind, &out.AllowTTLRewind + *out = new(bool) + **out = **in + } + if in.Consumer != nil { + in, out := &in.Consumer, &out.Consumer + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Parser != nil { + in, out := &in.Parser, &out.Parser + *out = make([]YdsSourceParserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsSourceObservation. +func (in *YdsSourceObservation) DeepCopy() *YdsSourceObservation { + if in == nil { + return nil + } + out := new(YdsSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsSourceParameters) DeepCopyInto(out *YdsSourceParameters) { + *out = *in + if in.AllowTTLRewind != nil { + in, out := &in.AllowTTLRewind, &out.AllowTTLRewind + *out = new(bool) + **out = **in + } + if in.Consumer != nil { + in, out := &in.Consumer, &out.Consumer + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Parser != nil { + in, out := &in.Parser, &out.Parser + *out = make([]YdsSourceParserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsSourceParameters. +func (in *YdsSourceParameters) DeepCopy() *YdsSourceParameters { + if in == nil { + return nil + } + out := new(YdsSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsSourceParserInitParameters) DeepCopyInto(out *YdsSourceParserInitParameters) { + *out = *in + if in.AuditTrailsV1Parser != nil { + in, out := &in.AuditTrailsV1Parser, &out.AuditTrailsV1Parser + *out = make([]ParserAuditTrailsV1ParserInitParameters, len(*in)) + copy(*out, *in) + } + if in.CloudLoggingParser != nil { + in, out := &in.CloudLoggingParser, &out.CloudLoggingParser + *out = make([]ParserCloudLoggingParserInitParameters, len(*in)) + copy(*out, *in) + } + if in.JSONParser != nil { + in, out := &in.JSONParser, &out.JSONParser + *out = make([]ParserJSONParserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TskvParser != nil { + in, out := &in.TskvParser, &out.TskvParser + *out = make([]ParserTskvParserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsSourceParserInitParameters. +func (in *YdsSourceParserInitParameters) DeepCopy() *YdsSourceParserInitParameters { + if in == nil { + return nil + } + out := new(YdsSourceParserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsSourceParserObservation) DeepCopyInto(out *YdsSourceParserObservation) { + *out = *in + if in.AuditTrailsV1Parser != nil { + in, out := &in.AuditTrailsV1Parser, &out.AuditTrailsV1Parser + *out = make([]ParserAuditTrailsV1ParserParameters, len(*in)) + copy(*out, *in) + } + if in.CloudLoggingParser != nil { + in, out := &in.CloudLoggingParser, &out.CloudLoggingParser + *out = make([]ParserCloudLoggingParserParameters, len(*in)) + copy(*out, *in) + } + if in.JSONParser != nil { + in, out := &in.JSONParser, &out.JSONParser + *out = make([]ParserJSONParserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TskvParser != nil { + in, out := &in.TskvParser, &out.TskvParser + *out = make([]ParserTskvParserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsSourceParserObservation. +func (in *YdsSourceParserObservation) DeepCopy() *YdsSourceParserObservation { + if in == nil { + return nil + } + out := new(YdsSourceParserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsSourceParserParameters) DeepCopyInto(out *YdsSourceParserParameters) { + *out = *in + if in.AuditTrailsV1Parser != nil { + in, out := &in.AuditTrailsV1Parser, &out.AuditTrailsV1Parser + *out = make([]ParserAuditTrailsV1ParserParameters, len(*in)) + copy(*out, *in) + } + if in.CloudLoggingParser != nil { + in, out := &in.CloudLoggingParser, &out.CloudLoggingParser + *out = make([]ParserCloudLoggingParserParameters, len(*in)) + copy(*out, *in) + } + if in.JSONParser != nil { + in, out := &in.JSONParser, &out.JSONParser + *out = make([]ParserJSONParserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TskvParser != nil { + in, out := &in.TskvParser, &out.TskvParser + *out = make([]ParserTskvParserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsSourceParserParameters. +func (in *YdsSourceParserParameters) DeepCopy() *YdsSourceParserParameters { + if in == nil { + return nil + } + out := new(YdsSourceParserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsTargetInitParameters) DeepCopyInto(out *YdsTargetInitParameters) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.SaveTxOrder != nil { + in, out := &in.SaveTxOrder, &out.SaveTxOrder + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = make([]YdsTargetSerializerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsTargetInitParameters. +func (in *YdsTargetInitParameters) DeepCopy() *YdsTargetInitParameters { + if in == nil { + return nil + } + out := new(YdsTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsTargetObservation) DeepCopyInto(out *YdsTargetObservation) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.SaveTxOrder != nil { + in, out := &in.SaveTxOrder, &out.SaveTxOrder + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = make([]YdsTargetSerializerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsTargetObservation. +func (in *YdsTargetObservation) DeepCopy() *YdsTargetObservation { + if in == nil { + return nil + } + out := new(YdsTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsTargetParameters) DeepCopyInto(out *YdsTargetParameters) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.SaveTxOrder != nil { + in, out := &in.SaveTxOrder, &out.SaveTxOrder + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = make([]YdsTargetSerializerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsTargetParameters. +func (in *YdsTargetParameters) DeepCopy() *YdsTargetParameters { + if in == nil { + return nil + } + out := new(YdsTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsTargetSerializerInitParameters) DeepCopyInto(out *YdsTargetSerializerInitParameters) { + *out = *in + if in.SerializerAuto != nil { + in, out := &in.SerializerAuto, &out.SerializerAuto + *out = make([]SerializerSerializerAutoInitParameters, len(*in)) + copy(*out, *in) + } + if in.SerializerDebezium != nil { + in, out := &in.SerializerDebezium, &out.SerializerDebezium + *out = make([]SerializerSerializerDebeziumInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SerializerJSON != nil { + in, out := &in.SerializerJSON, &out.SerializerJSON + *out = make([]SerializerSerializerJSONInitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsTargetSerializerInitParameters. +func (in *YdsTargetSerializerInitParameters) DeepCopy() *YdsTargetSerializerInitParameters { + if in == nil { + return nil + } + out := new(YdsTargetSerializerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsTargetSerializerObservation) DeepCopyInto(out *YdsTargetSerializerObservation) { + *out = *in + if in.SerializerAuto != nil { + in, out := &in.SerializerAuto, &out.SerializerAuto + *out = make([]SerializerSerializerAutoParameters, len(*in)) + copy(*out, *in) + } + if in.SerializerDebezium != nil { + in, out := &in.SerializerDebezium, &out.SerializerDebezium + *out = make([]SerializerSerializerDebeziumObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SerializerJSON != nil { + in, out := &in.SerializerJSON, &out.SerializerJSON + *out = make([]SerializerSerializerJSONParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsTargetSerializerObservation. +func (in *YdsTargetSerializerObservation) DeepCopy() *YdsTargetSerializerObservation { + if in == nil { + return nil + } + out := new(YdsTargetSerializerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YdsTargetSerializerParameters) DeepCopyInto(out *YdsTargetSerializerParameters) { + *out = *in + if in.SerializerAuto != nil { + in, out := &in.SerializerAuto, &out.SerializerAuto + *out = make([]SerializerSerializerAutoParameters, len(*in)) + copy(*out, *in) + } + if in.SerializerDebezium != nil { + in, out := &in.SerializerDebezium, &out.SerializerDebezium + *out = make([]SerializerSerializerDebeziumParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SerializerJSON != nil { + in, out := &in.SerializerJSON, &out.SerializerJSON + *out = make([]SerializerSerializerJSONParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YdsTargetSerializerParameters. +func (in *YdsTargetSerializerParameters) DeepCopy() *YdsTargetSerializerParameters { + if in == nil { + return nil + } + out := new(YdsTargetSerializerParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/datatransfer/v1alpha1/zz_generated.resolvers.go b/apis/datatransfer/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..a5a16c3 --- /dev/null +++ b/apis/datatransfer/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,1268 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + common "github.com/tagesjump/provider-upjet-yc/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Endpoint. +func (mg *Endpoint) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].ClickhouseSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].ClickhouseSource[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].ClickhouseSource[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].ClickhouseSource[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].ClickhouseSource[i4].SubnetID") + } + mg.Spec.ForProvider.Settings[i3].ClickhouseSource[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].ClickhouseSource[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].ClickhouseTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].ClickhouseTarget[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].ClickhouseTarget[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].ClickhouseTarget[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].ClickhouseTarget[i4].SubnetID") + } + mg.Spec.ForProvider.Settings[i3].ClickhouseTarget[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].ClickhouseTarget[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MongoSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].MongoSource[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.MongodbClusterList{}, + Managed: &v1alpha12.MongodbCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID") + } + mg.Spec.ForProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MongoSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MongoSource[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].MongoSource[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].MongoSource[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MongoSource[i4].SubnetID") + } + mg.Spec.ForProvider.Settings[i3].MongoSource[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MongoSource[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MongoTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.MongodbClusterList{}, + Managed: &v1alpha12.MongodbCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID") + } + mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MongoTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].SubnetID") + } + mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MongoTarget[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.MySQLClusterList{}, + Managed: &v1alpha12.MySQLCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterID") + } + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetID") + } + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Database), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].DatabaseRef, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].DatabaseSelector, + To: reference.To{ + List: &v1alpha12.MySQLDatabaseList{}, + Managed: &v1alpha12.MySQLDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Database") + } + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].Database = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].DatabaseRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLSource); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].SecurityGroupsRefs, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].SecurityGroupsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].SecurityGroups") + } + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].SecurityGroupsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].User), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].UserRef, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].UserSelector, + To: reference.To{ + List: &v1alpha12.MySQLUserList{}, + Managed: &v1alpha12.MySQLUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].User") + } + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].User = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MySQLSource[i4].UserRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.MySQLClusterList{}, + Managed: &v1alpha12.MySQLCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterID") + } + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetID") + } + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Database), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].DatabaseRef, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].DatabaseSelector, + To: reference.To{ + List: &v1alpha12.MySQLDatabaseList{}, + Managed: &v1alpha12.MySQLDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Database") + } + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].Database = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].DatabaseRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLTarget); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].SecurityGroupsRefs, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].SecurityGroupsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].SecurityGroups") + } + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].SecurityGroupsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].MySQLTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].User), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].UserRef, + Selector: mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].UserSelector, + To: reference.To{ + List: &v1alpha12.MySQLUserList{}, + Managed: &v1alpha12.MySQLUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].User") + } + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].User = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].MySQLTarget[i4].UserRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlClusterList{}, + Managed: &v1alpha12.PostgresqlCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterID") + } + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetID") + } + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Database), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].DatabaseRef, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].DatabaseSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlDatabaseList{}, + Managed: &v1alpha12.PostgresqlDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Database") + } + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].Database = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].DatabaseRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresSource); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].SecurityGroupsRefs, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].SecurityGroupsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].SecurityGroups") + } + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].SecurityGroupsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].User), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].UserRef, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].UserSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlUserList{}, + Managed: &v1alpha12.PostgresqlUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].User") + } + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].User = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].PostgresSource[i4].UserRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlClusterList{}, + Managed: &v1alpha12.PostgresqlCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterID") + } + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetIDRef, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetID") + } + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Database), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].DatabaseRef, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].DatabaseSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlDatabaseList{}, + Managed: &v1alpha12.PostgresqlDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Database") + } + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].Database = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].DatabaseRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresTarget); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].SecurityGroupsRefs, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].SecurityGroupsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].SecurityGroups") + } + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].SecurityGroupsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Settings[i3].PostgresTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].User), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].UserRef, + Selector: mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].UserSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlUserList{}, + Managed: &v1alpha12.PostgresqlUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].User") + } + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].User = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Settings[i3].PostgresTarget[i4].UserRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].ClickhouseSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].ClickhouseSource[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].ClickhouseSource[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].ClickhouseSource[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].ClickhouseSource[i4].SubnetID") + } + mg.Spec.InitProvider.Settings[i3].ClickhouseSource[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].ClickhouseSource[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].ClickhouseTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].ClickhouseTarget[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].ClickhouseTarget[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].ClickhouseTarget[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].ClickhouseTarget[i4].SubnetID") + } + mg.Spec.InitProvider.Settings[i3].ClickhouseTarget[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].ClickhouseTarget[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MongoSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].MongoSource[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.MongodbClusterList{}, + Managed: &v1alpha12.MongodbCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID") + } + mg.Spec.InitProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MongoSource[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MongoSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MongoSource[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].MongoSource[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].MongoSource[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MongoSource[i4].SubnetID") + } + mg.Spec.InitProvider.Settings[i3].MongoSource[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MongoSource[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MongoTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.MongodbClusterList{}, + Managed: &v1alpha12.MongodbCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID") + } + mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].Connection[i5].ConnectionOptions[i6].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MongoTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].SubnetID") + } + mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MongoTarget[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.MySQLClusterList{}, + Managed: &v1alpha12.MySQLCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterID") + } + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetID") + } + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Connection[i5].OnPremise[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Database), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].DatabaseRef, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].DatabaseSelector, + To: reference.To{ + List: &v1alpha12.MySQLDatabaseList{}, + Managed: &v1alpha12.MySQLDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Database") + } + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].Database = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].DatabaseRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLSource); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].SecurityGroupsRefs, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].SecurityGroupsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].SecurityGroups") + } + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].SecurityGroupsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].User), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].UserRef, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].UserSelector, + To: reference.To{ + List: &v1alpha12.MySQLUserList{}, + Managed: &v1alpha12.MySQLUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].User") + } + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].User = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MySQLSource[i4].UserRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.MySQLClusterList{}, + Managed: &v1alpha12.MySQLCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterID") + } + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetID") + } + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Connection[i5].OnPremise[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Database), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].DatabaseRef, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].DatabaseSelector, + To: reference.To{ + List: &v1alpha12.MySQLDatabaseList{}, + Managed: &v1alpha12.MySQLDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Database") + } + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].Database = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].DatabaseRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLTarget); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].SecurityGroupsRefs, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].SecurityGroupsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].SecurityGroups") + } + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].SecurityGroupsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].MySQLTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].User), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].UserRef, + Selector: mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].UserSelector, + To: reference.To{ + List: &v1alpha12.MySQLUserList{}, + Managed: &v1alpha12.MySQLUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].User") + } + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].User = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].MySQLTarget[i4].UserRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlClusterList{}, + Managed: &v1alpha12.PostgresqlCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterID") + } + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresSource); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetID") + } + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Connection[i5].OnPremise[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Database), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].DatabaseRef, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].DatabaseSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlDatabaseList{}, + Managed: &v1alpha12.PostgresqlDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Database") + } + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].Database = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].DatabaseRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresSource); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].SecurityGroupsRefs, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].SecurityGroupsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].SecurityGroups") + } + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].SecurityGroupsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresSource); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].User), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].UserRef, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].UserSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlUserList{}, + Managed: &v1alpha12.PostgresqlUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].User") + } + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].User = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].PostgresSource[i4].UserRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterIDSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlClusterList{}, + Managed: &v1alpha12.PostgresqlCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterID") + } + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].MdbClusterIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresTarget); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection); i5++ { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise); i6++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetIDRef, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetID") + } + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Connection[i5].OnPremise[i6].SubnetIDRef = rsp.ResolvedReference + + } + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Database), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].DatabaseRef, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].DatabaseSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlDatabaseList{}, + Managed: &v1alpha12.PostgresqlDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Database") + } + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].Database = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].DatabaseRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresTarget); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].SecurityGroupsRefs, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].SecurityGroupsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].SecurityGroups") + } + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].SecurityGroupsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Settings); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Settings[i3].PostgresTarget); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].User), + Extract: common.ExtractSpecName(), + Reference: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].UserRef, + Selector: mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].UserSelector, + To: reference.To{ + List: &v1alpha12.PostgresqlUserList{}, + Managed: &v1alpha12.PostgresqlUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].User") + } + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].User = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Settings[i3].PostgresTarget[i4].UserRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this Transfer. +func (mg *Transfer) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SourceIDRef, + Selector: mg.Spec.ForProvider.SourceIDSelector, + To: reference.To{ + List: &EndpointList{}, + Managed: &Endpoint{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceID") + } + mg.Spec.ForProvider.SourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TargetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.TargetIDRef, + Selector: mg.Spec.ForProvider.TargetIDSelector, + To: reference.To{ + List: &EndpointList{}, + Managed: &Endpoint{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TargetID") + } + mg.Spec.ForProvider.TargetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TargetIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SourceIDRef, + Selector: mg.Spec.InitProvider.SourceIDSelector, + To: reference.To{ + List: &EndpointList{}, + Managed: &Endpoint{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceID") + } + mg.Spec.InitProvider.SourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TargetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.TargetIDRef, + Selector: mg.Spec.InitProvider.TargetIDSelector, + To: reference.To{ + List: &EndpointList{}, + Managed: &Endpoint{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TargetID") + } + mg.Spec.InitProvider.TargetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TargetIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/datatransfer/v1alpha1/zz_groupversion_info.go b/apis/datatransfer/v1alpha1/zz_groupversion_info.go index 2c5af16..9bf7b3e 100755 --- a/apis/datatransfer/v1alpha1/zz_groupversion_info.go +++ b/apis/datatransfer/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/datatransfer/v1alpha1/zz_transfer_terraformed.go b/apis/datatransfer/v1alpha1/zz_transfer_terraformed.go index d0ded57..01a3183 100755 --- a/apis/datatransfer/v1alpha1/zz_transfer_terraformed.go +++ b/apis/datatransfer/v1alpha1/zz_transfer_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Transfer func (mg *Transfer) GetTerraformResourceType() string { - return "yandex_datatransfer_transfer" + return "yandex_datatransfer_transfer" } // GetConnectionDetailsMapping for this Transfer func (tr *Transfer) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Transfer func (tr *Transfer) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Transfer func (tr *Transfer) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Transfer func (tr *Transfer) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Transfer func (tr *Transfer) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Transfer func (tr *Transfer) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Transfer func (tr *Transfer) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Transfer func (tr *Transfer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Transfer using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Transfer) LateInitialize(attrs []byte) (bool, error) { - params := &TransferParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &TransferParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Transfer) GetTerraformSchemaVersion() int { - return 1 + return 1 } diff --git a/apis/datatransfer/v1alpha1/zz_transfer_types.go b/apis/datatransfer/v1alpha1/zz_transfer_types.go index 87bc25c..9e1eaff 100755 --- a/apis/datatransfer/v1alpha1/zz_transfer_types.go +++ b/apis/datatransfer/v1alpha1/zz_transfer_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,1213 +7,1036 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type ColumnsInitParameters struct { + // (List of String) + ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` -// (List of String) -ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` - -// (List of String) -IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` + // (List of String) + IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` } - type ColumnsObservation struct { + // (List of String) + ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` -// (List of String) -ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` - -// (List of String) -IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` + // (List of String) + IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` } - type ColumnsParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` } - type ConvertToStringInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []ColumnsInitParameters `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []ColumnsInitParameters `json:"columns,omitempty" tf:"columns,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []TablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []TablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type ConvertToStringObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []ColumnsObservation `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []ColumnsObservation `json:"columns,omitempty" tf:"columns,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []TablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []TablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` } - type ConvertToStringParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Columns []ColumnsParameters `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Columns []ColumnsParameters `json:"columns,omitempty" tf:"columns,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Tables []TablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Tables []TablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type FilterColumnsColumnsInitParameters struct { + // (List of String) + ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` -// (List of String) -ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` - -// (List of String) -IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` + // (List of String) + IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` } - type FilterColumnsColumnsObservation struct { + // (List of String) + ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` -// (List of String) -ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` - -// (List of String) -IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` + // (List of String) + IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` } - type FilterColumnsColumnsParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` } - type FilterColumnsInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []FilterColumnsColumnsInitParameters `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []FilterColumnsColumnsInitParameters `json:"columns,omitempty" tf:"columns,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []FilterColumnsTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []FilterColumnsTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type FilterColumnsObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []FilterColumnsColumnsObservation `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []FilterColumnsColumnsObservation `json:"columns,omitempty" tf:"columns,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []FilterColumnsTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []FilterColumnsTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` } - type FilterColumnsParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Columns []FilterColumnsColumnsParameters `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Columns []FilterColumnsColumnsParameters `json:"columns,omitempty" tf:"columns,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Tables []FilterColumnsTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Tables []FilterColumnsTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type FilterColumnsTablesInitParameters struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type FilterColumnsTablesObservation struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type FilterColumnsTablesParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type FilterRowsInitParameters struct { + // (String) + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` -// (String) -Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []FilterRowsTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []FilterRowsTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type FilterRowsObservation struct { + // (String) + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` -// (String) -Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []FilterRowsTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []FilterRowsTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` } - type FilterRowsParameters struct { + // (String) + // +kubebuilder:validation:Optional + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Tables []FilterRowsTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Tables []FilterRowsTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type FilterRowsTablesInitParameters struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type FilterRowsTablesObservation struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type FilterRowsTablesParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type FunctionInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -MaskFunctionHash []MaskFunctionHashInitParameters `json:"maskFunctionHash,omitempty" tf:"mask_function_hash,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MaskFunctionHash []MaskFunctionHashInitParameters `json:"maskFunctionHash,omitempty" tf:"mask_function_hash,omitempty"` } - type FunctionObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -MaskFunctionHash []MaskFunctionHashObservation `json:"maskFunctionHash,omitempty" tf:"mask_function_hash,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MaskFunctionHash []MaskFunctionHashObservation `json:"maskFunctionHash,omitempty" tf:"mask_function_hash,omitempty"` } - type FunctionParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -MaskFunctionHash []MaskFunctionHashParameters `json:"maskFunctionHash,omitempty" tf:"mask_function_hash,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + MaskFunctionHash []MaskFunctionHashParameters `json:"maskFunctionHash,omitempty" tf:"mask_function_hash,omitempty"` } - type MaskFieldInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Function []FunctionInitParameters `json:"function,omitempty" tf:"function,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Function []FunctionInitParameters `json:"function,omitempty" tf:"function,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []MaskFieldTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []MaskFieldTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type MaskFieldObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Function []FunctionObservation `json:"function,omitempty" tf:"function,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Function []FunctionObservation `json:"function,omitempty" tf:"function,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Tables []MaskFieldTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []MaskFieldTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` } - type MaskFieldParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Function []FunctionParameters `json:"function,omitempty" tf:"function,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Function []FunctionParameters `json:"function,omitempty" tf:"function,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Tables []MaskFieldTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Tables []MaskFieldTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type MaskFieldTablesInitParameters struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type MaskFieldTablesObservation struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type MaskFieldTablesParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type MaskFunctionHashInitParameters struct { - -// (String) -UserDefinedSalt *string `json:"userDefinedSalt,omitempty" tf:"user_defined_salt,omitempty"` + // (String) + UserDefinedSalt *string `json:"userDefinedSalt,omitempty" tf:"user_defined_salt,omitempty"` } - type MaskFunctionHashObservation struct { - -// (String) -UserDefinedSalt *string `json:"userDefinedSalt,omitempty" tf:"user_defined_salt,omitempty"` + // (String) + UserDefinedSalt *string `json:"userDefinedSalt,omitempty" tf:"user_defined_salt,omitempty"` } - type MaskFunctionHashParameters struct { - -// (String) -// +kubebuilder:validation:Optional -UserDefinedSalt *string `json:"userDefinedSalt,omitempty" tf:"user_defined_salt,omitempty"` + // (String) + // +kubebuilder:validation:Optional + UserDefinedSalt *string `json:"userDefinedSalt,omitempty" tf:"user_defined_salt,omitempty"` } - type NewNameInitParameters struct { + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (String) -NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` + // (String) + NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` } - type NewNameObservation struct { + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (String) -NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` + // (String) + NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` } - type NewNameParameters struct { + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` + // (String) + // +kubebuilder:validation:Optional + NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` } - type OriginalNameInitParameters struct { + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (String) -NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` + // (String) + NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` } - type OriginalNameObservation struct { + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (String) -NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` + // (String) + NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` } - type OriginalNameParameters struct { + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` + // (String) + // +kubebuilder:validation:Optional + NameSpace *string `json:"nameSpace,omitempty" tf:"name_space,omitempty"` } - type RenameTablesInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -RenameTables []RenameTablesRenameTablesInitParameters `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + RenameTables []RenameTablesRenameTablesInitParameters `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` } - type RenameTablesObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -RenameTables []RenameTablesRenameTablesObservation `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + RenameTables []RenameTablesRenameTablesObservation `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` } - type RenameTablesParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -RenameTables []RenameTablesRenameTablesParameters `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + RenameTables []RenameTablesRenameTablesParameters `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` } - type RenameTablesRenameTablesInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + NewName []NewNameInitParameters `json:"newName,omitempty" tf:"new_name,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -NewName []NewNameInitParameters `json:"newName,omitempty" tf:"new_name,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OriginalName []OriginalNameInitParameters `json:"originalName,omitempty" tf:"original_name,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OriginalName []OriginalNameInitParameters `json:"originalName,omitempty" tf:"original_name,omitempty"` } - type RenameTablesRenameTablesObservation struct { + // (Block List, Max: 1) (see below for nested schema) + NewName []NewNameObservation `json:"newName,omitempty" tf:"new_name,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -NewName []NewNameObservation `json:"newName,omitempty" tf:"new_name,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -OriginalName []OriginalNameObservation `json:"originalName,omitempty" tf:"original_name,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + OriginalName []OriginalNameObservation `json:"originalName,omitempty" tf:"original_name,omitempty"` } - type RenameTablesRenameTablesParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + NewName []NewNameParameters `json:"newName,omitempty" tf:"new_name,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -NewName []NewNameParameters `json:"newName,omitempty" tf:"new_name,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -OriginalName []OriginalNameParameters `json:"originalName,omitempty" tf:"original_name,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + OriginalName []OriginalNameParameters `json:"originalName,omitempty" tf:"original_name,omitempty"` } - type ReplacePrimaryKeyInitParameters struct { + // (List of String) + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` -// (List of String) -Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []ReplacePrimaryKeyTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []ReplacePrimaryKeyTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type ReplacePrimaryKeyObservation struct { + // (List of String) + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` -// (List of String) -Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []ReplacePrimaryKeyTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []ReplacePrimaryKeyTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` } - type ReplacePrimaryKeyParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Tables []ReplacePrimaryKeyTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Tables []ReplacePrimaryKeyTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type ReplacePrimaryKeyTablesInitParameters struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type ReplacePrimaryKeyTablesObservation struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type ReplacePrimaryKeyTablesParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type RuntimeInitParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -YcRuntime []YcRuntimeInitParameters `json:"ycRuntime,omitempty" tf:"yc_runtime,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YcRuntime []YcRuntimeInitParameters `json:"ycRuntime,omitempty" tf:"yc_runtime,omitempty"` } - type RuntimeObservation struct { - -// (Block List, Max: 1) (see below for nested schema) -YcRuntime []YcRuntimeObservation `json:"ycRuntime,omitempty" tf:"yc_runtime,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + YcRuntime []YcRuntimeObservation `json:"ycRuntime,omitempty" tf:"yc_runtime,omitempty"` } - type RuntimeParameters struct { - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -YcRuntime []YcRuntimeParameters `json:"ycRuntime,omitempty" tf:"yc_runtime,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + YcRuntime []YcRuntimeParameters `json:"ycRuntime,omitempty" tf:"yc_runtime,omitempty"` } - type SharderTransformerColumnsInitParameters struct { + // (List of String) + ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` -// (List of String) -ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` - -// (List of String) -IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` + // (List of String) + IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` } - type SharderTransformerColumnsObservation struct { + // (List of String) + ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` -// (List of String) -ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` - -// (List of String) -IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` + // (List of String) + IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` } - type SharderTransformerColumnsParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeColumns []*string `json:"excludeColumns,omitempty" tf:"exclude_columns,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeColumns []*string `json:"includeColumns,omitempty" tf:"include_columns,omitempty"` } - type SharderTransformerInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []SharderTransformerColumnsInitParameters `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []SharderTransformerColumnsInitParameters `json:"columns,omitempty" tf:"columns,omitempty"` - -// (Number) -ShardsCount *float64 `json:"shardsCount,omitempty" tf:"shards_count,omitempty"` + // (Number) + ShardsCount *float64 `json:"shardsCount,omitempty" tf:"shards_count,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Tables []SharderTransformerTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []SharderTransformerTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type SharderTransformerObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []SharderTransformerColumnsObservation `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []SharderTransformerColumnsObservation `json:"columns,omitempty" tf:"columns,omitempty"` + // (Number) + ShardsCount *float64 `json:"shardsCount,omitempty" tf:"shards_count,omitempty"` -// (Number) -ShardsCount *float64 `json:"shardsCount,omitempty" tf:"shards_count,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Tables []SharderTransformerTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []SharderTransformerTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` } - type SharderTransformerParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Columns []SharderTransformerColumnsParameters `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Columns []SharderTransformerColumnsParameters `json:"columns,omitempty" tf:"columns,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + ShardsCount *float64 `json:"shardsCount,omitempty" tf:"shards_count,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -ShardsCount *float64 `json:"shardsCount,omitempty" tf:"shards_count,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Tables []SharderTransformerTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Tables []SharderTransformerTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type SharderTransformerTablesInitParameters struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type SharderTransformerTablesObservation struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type SharderTransformerTablesParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type TableSplitterTransformerInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` - -// (String) -Splitter *string `json:"splitter,omitempty" tf:"splitter,omitempty"` + // (String) + Splitter *string `json:"splitter,omitempty" tf:"splitter,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Tables []TableSplitterTransformerTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []TableSplitterTransformerTablesInitParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type TableSplitterTransformerObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` - -// (String) -Splitter *string `json:"splitter,omitempty" tf:"splitter,omitempty"` + // (String) + Splitter *string `json:"splitter,omitempty" tf:"splitter,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Tables []TableSplitterTransformerTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Tables []TableSplitterTransformerTablesObservation `json:"tables,omitempty" tf:"tables,omitempty"` } - type TableSplitterTransformerParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Splitter *string `json:"splitter,omitempty" tf:"splitter,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Splitter *string `json:"splitter,omitempty" tf:"splitter,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Tables []TableSplitterTransformerTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Tables []TableSplitterTransformerTablesParameters `json:"tables,omitempty" tf:"tables,omitempty"` } - type TableSplitterTransformerTablesInitParameters struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type TableSplitterTransformerTablesObservation struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type TableSplitterTransformerTablesParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type TablesInitParameters struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type TablesObservation struct { + // (List of String) + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type TablesParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -ExcludeTables []*string `json:"excludeTables,omitempty" tf:"exclude_tables,omitempty"` - -// (List of String) -// +kubebuilder:validation:Optional -IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IncludeTables []*string `json:"includeTables,omitempty" tf:"include_tables,omitempty"` } - type TransferInitParameters struct { + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // (Map of String) + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// (Map of String) -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + OnCreateActivateMode *string `json:"onCreateActivateMode,omitempty" tf:"on_create_activate_mode,omitempty"` -// (String) -OnCreateActivateMode *string `json:"onCreateActivateMode,omitempty" tf:"on_create_activate_mode,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Runtime []RuntimeInitParameters `json:"runtime,omitempty" tf:"runtime,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Runtime []RuntimeInitParameters `json:"runtime,omitempty" tf:"runtime,omitempty"` + // (String) + // +crossplane:generate:reference:type=Endpoint + SourceID *string `json:"sourceId,omitempty" tf:"source_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=Endpoint -SourceID *string `json:"sourceId,omitempty" tf:"source_id,omitempty"` + // Reference to a Endpoint to populate sourceId. + // +kubebuilder:validation:Optional + SourceIDRef *v1.Reference `json:"sourceIdRef,omitempty" tf:"-"` -// Reference to a Endpoint to populate sourceId. -// +kubebuilder:validation:Optional -SourceIDRef *v1.Reference `json:"sourceIdRef,omitempty" tf:"-"` + // Selector for a Endpoint to populate sourceId. + // +kubebuilder:validation:Optional + SourceIDSelector *v1.Selector `json:"sourceIdSelector,omitempty" tf:"-"` -// Selector for a Endpoint to populate sourceId. -// +kubebuilder:validation:Optional -SourceIDSelector *v1.Selector `json:"sourceIdSelector,omitempty" tf:"-"` + // (String) + // +crossplane:generate:reference:type=Endpoint + TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` -// (String) -// +crossplane:generate:reference:type=Endpoint -TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` + // Reference to a Endpoint to populate targetId. + // +kubebuilder:validation:Optional + TargetIDRef *v1.Reference `json:"targetIdRef,omitempty" tf:"-"` -// Reference to a Endpoint to populate targetId. -// +kubebuilder:validation:Optional -TargetIDRef *v1.Reference `json:"targetIdRef,omitempty" tf:"-"` + // Selector for a Endpoint to populate targetId. + // +kubebuilder:validation:Optional + TargetIDSelector *v1.Selector `json:"targetIdSelector,omitempty" tf:"-"` -// Selector for a Endpoint to populate targetId. -// +kubebuilder:validation:Optional -TargetIDSelector *v1.Selector `json:"targetIdSelector,omitempty" tf:"-"` + // (Block List, Max: 1) (see below for nested schema) + Transformation []TransformationInitParameters `json:"transformation,omitempty" tf:"transformation,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Transformation []TransformationInitParameters `json:"transformation,omitempty" tf:"transformation,omitempty"` - -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type TransferObservation struct { + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// (String) -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // (String) + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// (String) The ID of this resource. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// (Map of String) -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // (Map of String) + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -OnCreateActivateMode *string `json:"onCreateActivateMode,omitempty" tf:"on_create_activate_mode,omitempty"` + // (String) + OnCreateActivateMode *string `json:"onCreateActivateMode,omitempty" tf:"on_create_activate_mode,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Runtime []RuntimeObservation `json:"runtime,omitempty" tf:"runtime,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Runtime []RuntimeObservation `json:"runtime,omitempty" tf:"runtime,omitempty"` -// (String) -SourceID *string `json:"sourceId,omitempty" tf:"source_id,omitempty"` + // (String) + SourceID *string `json:"sourceId,omitempty" tf:"source_id,omitempty"` -// (String) -TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` + // (String) + TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Transformation []TransformationObservation `json:"transformation,omitempty" tf:"transformation,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Transformation []TransformationObservation `json:"transformation,omitempty" tf:"transformation,omitempty"` -// (String) -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// (String) -Warning *string `json:"warning,omitempty" tf:"warning,omitempty"` + // (String) + Warning *string `json:"warning,omitempty" tf:"warning,omitempty"` } - type TransferParameters struct { + // (String) + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// (Map of String) -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // (Map of String) + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -OnCreateActivateMode *string `json:"onCreateActivateMode,omitempty" tf:"on_create_activate_mode,omitempty"` + // (String) + // +kubebuilder:validation:Optional + OnCreateActivateMode *string `json:"onCreateActivateMode,omitempty" tf:"on_create_activate_mode,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Runtime []RuntimeParameters `json:"runtime,omitempty" tf:"runtime,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Runtime []RuntimeParameters `json:"runtime,omitempty" tf:"runtime,omitempty"` -// (String) -// +crossplane:generate:reference:type=Endpoint -// +kubebuilder:validation:Optional -SourceID *string `json:"sourceId,omitempty" tf:"source_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=Endpoint + // +kubebuilder:validation:Optional + SourceID *string `json:"sourceId,omitempty" tf:"source_id,omitempty"` -// Reference to a Endpoint to populate sourceId. -// +kubebuilder:validation:Optional -SourceIDRef *v1.Reference `json:"sourceIdRef,omitempty" tf:"-"` + // Reference to a Endpoint to populate sourceId. + // +kubebuilder:validation:Optional + SourceIDRef *v1.Reference `json:"sourceIdRef,omitempty" tf:"-"` -// Selector for a Endpoint to populate sourceId. -// +kubebuilder:validation:Optional -SourceIDSelector *v1.Selector `json:"sourceIdSelector,omitempty" tf:"-"` + // Selector for a Endpoint to populate sourceId. + // +kubebuilder:validation:Optional + SourceIDSelector *v1.Selector `json:"sourceIdSelector,omitempty" tf:"-"` -// (String) -// +crossplane:generate:reference:type=Endpoint -// +kubebuilder:validation:Optional -TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=Endpoint + // +kubebuilder:validation:Optional + TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` -// Reference to a Endpoint to populate targetId. -// +kubebuilder:validation:Optional -TargetIDRef *v1.Reference `json:"targetIdRef,omitempty" tf:"-"` + // Reference to a Endpoint to populate targetId. + // +kubebuilder:validation:Optional + TargetIDRef *v1.Reference `json:"targetIdRef,omitempty" tf:"-"` -// Selector for a Endpoint to populate targetId. -// +kubebuilder:validation:Optional -TargetIDSelector *v1.Selector `json:"targetIdSelector,omitempty" tf:"-"` + // Selector for a Endpoint to populate targetId. + // +kubebuilder:validation:Optional + TargetIDSelector *v1.Selector `json:"targetIdSelector,omitempty" tf:"-"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Transformation []TransformationParameters `json:"transformation,omitempty" tf:"transformation,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Transformation []TransformationParameters `json:"transformation,omitempty" tf:"transformation,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type TransformationInitParameters struct { - -// (Block List) (see below for nested schema) -Transformers []TransformersInitParameters `json:"transformers,omitempty" tf:"transformers,omitempty"` + // (Block List) (see below for nested schema) + Transformers []TransformersInitParameters `json:"transformers,omitempty" tf:"transformers,omitempty"` } - type TransformationObservation struct { - -// (Block List) (see below for nested schema) -Transformers []TransformersObservation `json:"transformers,omitempty" tf:"transformers,omitempty"` + // (Block List) (see below for nested schema) + Transformers []TransformersObservation `json:"transformers,omitempty" tf:"transformers,omitempty"` } - type TransformationParameters struct { - -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Transformers []TransformersParameters `json:"transformers,omitempty" tf:"transformers,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Transformers []TransformersParameters `json:"transformers,omitempty" tf:"transformers,omitempty"` } - type TransformersInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + ConvertToString []ConvertToStringInitParameters `json:"convertToString,omitempty" tf:"convert_to_string,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ConvertToString []ConvertToStringInitParameters `json:"convertToString,omitempty" tf:"convert_to_string,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -FilterColumns []FilterColumnsInitParameters `json:"filterColumns,omitempty" tf:"filter_columns,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + FilterColumns []FilterColumnsInitParameters `json:"filterColumns,omitempty" tf:"filter_columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -FilterRows []FilterRowsInitParameters `json:"filterRows,omitempty" tf:"filter_rows,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + FilterRows []FilterRowsInitParameters `json:"filterRows,omitempty" tf:"filter_rows,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MaskField []MaskFieldInitParameters `json:"maskField,omitempty" tf:"mask_field,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MaskField []MaskFieldInitParameters `json:"maskField,omitempty" tf:"mask_field,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -RenameTables []RenameTablesInitParameters `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + RenameTables []RenameTablesInitParameters `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ReplacePrimaryKey []ReplacePrimaryKeyInitParameters `json:"replacePrimaryKey,omitempty" tf:"replace_primary_key,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ReplacePrimaryKey []ReplacePrimaryKeyInitParameters `json:"replacePrimaryKey,omitempty" tf:"replace_primary_key,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SharderTransformer []SharderTransformerInitParameters `json:"sharderTransformer,omitempty" tf:"sharder_transformer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SharderTransformer []SharderTransformerInitParameters `json:"sharderTransformer,omitempty" tf:"sharder_transformer,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -TableSplitterTransformer []TableSplitterTransformerInitParameters `json:"tableSplitterTransformer,omitempty" tf:"table_splitter_transformer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TableSplitterTransformer []TableSplitterTransformerInitParameters `json:"tableSplitterTransformer,omitempty" tf:"table_splitter_transformer,omitempty"` } - type TransformersObservation struct { + // (Block List, Max: 1) (see below for nested schema) + ConvertToString []ConvertToStringObservation `json:"convertToString,omitempty" tf:"convert_to_string,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ConvertToString []ConvertToStringObservation `json:"convertToString,omitempty" tf:"convert_to_string,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + FilterColumns []FilterColumnsObservation `json:"filterColumns,omitempty" tf:"filter_columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -FilterColumns []FilterColumnsObservation `json:"filterColumns,omitempty" tf:"filter_columns,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + FilterRows []FilterRowsObservation `json:"filterRows,omitempty" tf:"filter_rows,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -FilterRows []FilterRowsObservation `json:"filterRows,omitempty" tf:"filter_rows,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + MaskField []MaskFieldObservation `json:"maskField,omitempty" tf:"mask_field,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -MaskField []MaskFieldObservation `json:"maskField,omitempty" tf:"mask_field,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + RenameTables []RenameTablesObservation `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -RenameTables []RenameTablesObservation `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + ReplacePrimaryKey []ReplacePrimaryKeyObservation `json:"replacePrimaryKey,omitempty" tf:"replace_primary_key,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -ReplacePrimaryKey []ReplacePrimaryKeyObservation `json:"replacePrimaryKey,omitempty" tf:"replace_primary_key,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SharderTransformer []SharderTransformerObservation `json:"sharderTransformer,omitempty" tf:"sharder_transformer,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SharderTransformer []SharderTransformerObservation `json:"sharderTransformer,omitempty" tf:"sharder_transformer,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -TableSplitterTransformer []TableSplitterTransformerObservation `json:"tableSplitterTransformer,omitempty" tf:"table_splitter_transformer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + TableSplitterTransformer []TableSplitterTransformerObservation `json:"tableSplitterTransformer,omitempty" tf:"table_splitter_transformer,omitempty"` } - type TransformersParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ConvertToString []ConvertToStringParameters `json:"convertToString,omitempty" tf:"convert_to_string,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ConvertToString []ConvertToStringParameters `json:"convertToString,omitempty" tf:"convert_to_string,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -FilterColumns []FilterColumnsParameters `json:"filterColumns,omitempty" tf:"filter_columns,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + FilterColumns []FilterColumnsParameters `json:"filterColumns,omitempty" tf:"filter_columns,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -FilterRows []FilterRowsParameters `json:"filterRows,omitempty" tf:"filter_rows,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + FilterRows []FilterRowsParameters `json:"filterRows,omitempty" tf:"filter_rows,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -MaskField []MaskFieldParameters `json:"maskField,omitempty" tf:"mask_field,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + MaskField []MaskFieldParameters `json:"maskField,omitempty" tf:"mask_field,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -RenameTables []RenameTablesParameters `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + RenameTables []RenameTablesParameters `json:"renameTables,omitempty" tf:"rename_tables,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -ReplacePrimaryKey []ReplacePrimaryKeyParameters `json:"replacePrimaryKey,omitempty" tf:"replace_primary_key,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + ReplacePrimaryKey []ReplacePrimaryKeyParameters `json:"replacePrimaryKey,omitempty" tf:"replace_primary_key,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -SharderTransformer []SharderTransformerParameters `json:"sharderTransformer,omitempty" tf:"sharder_transformer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + SharderTransformer []SharderTransformerParameters `json:"sharderTransformer,omitempty" tf:"sharder_transformer,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -TableSplitterTransformer []TableSplitterTransformerParameters `json:"tableSplitterTransformer,omitempty" tf:"table_splitter_transformer,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + TableSplitterTransformer []TableSplitterTransformerParameters `json:"tableSplitterTransformer,omitempty" tf:"table_splitter_transformer,omitempty"` } - type UploadShardParamsInitParameters struct { + // (Number) + JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` -// (Number) -JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` - -// (Number) -ProcessCount *float64 `json:"processCount,omitempty" tf:"process_count,omitempty"` + // (Number) + ProcessCount *float64 `json:"processCount,omitempty" tf:"process_count,omitempty"` } - type UploadShardParamsObservation struct { + // (Number) + JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` -// (Number) -JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` - -// (Number) -ProcessCount *float64 `json:"processCount,omitempty" tf:"process_count,omitempty"` + // (Number) + ProcessCount *float64 `json:"processCount,omitempty" tf:"process_count,omitempty"` } - type UploadShardParamsParameters struct { + // (Number) + // +kubebuilder:validation:Optional + JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` - -// (Number) -// +kubebuilder:validation:Optional -ProcessCount *float64 `json:"processCount,omitempty" tf:"process_count,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + ProcessCount *float64 `json:"processCount,omitempty" tf:"process_count,omitempty"` } - type YcRuntimeInitParameters struct { + // (Number) + JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` -// (Number) -JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -UploadShardParams []UploadShardParamsInitParameters `json:"uploadShardParams,omitempty" tf:"upload_shard_params,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + UploadShardParams []UploadShardParamsInitParameters `json:"uploadShardParams,omitempty" tf:"upload_shard_params,omitempty"` } - type YcRuntimeObservation struct { + // (Number) + JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` -// (Number) -JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -UploadShardParams []UploadShardParamsObservation `json:"uploadShardParams,omitempty" tf:"upload_shard_params,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + UploadShardParams []UploadShardParamsObservation `json:"uploadShardParams,omitempty" tf:"upload_shard_params,omitempty"` } - type YcRuntimeParameters struct { + // (Number) + // +kubebuilder:validation:Optional + JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` -// (Number) -// +kubebuilder:validation:Optional -JobCount *float64 `json:"jobCount,omitempty" tf:"job_count,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -UploadShardParams []UploadShardParamsParameters `json:"uploadShardParams,omitempty" tf:"upload_shard_params,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + UploadShardParams []UploadShardParamsParameters `json:"uploadShardParams,omitempty" tf:"upload_shard_params,omitempty"` } // TransferSpec defines the desired state of Transfer type TransferSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider TransferParameters `json:"forProvider"` + ForProvider TransferParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -1226,21 +1047,20 @@ type TransferSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider TransferInitParameters `json:"initProvider,omitempty"` + InitProvider TransferInitParameters `json:"initProvider,omitempty"` } // TransferStatus defines the observed state of Transfer. type TransferStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider TransferObservation `json:"atProvider,omitempty"` + AtProvider TransferObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - -// Transfer is the Schema for the Transfers API. +// Transfer is the Schema for the Transfers API. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" diff --git a/apis/dns/v1alpha1/zz_generated.conversion_hubs.go b/apis/dns/v1alpha1/zz_generated.conversion_hubs.go index c4577dc..4c5409c 100755 --- a/apis/dns/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/dns/v1alpha1/zz_generated.conversion_hubs.go @@ -1,16 +1,12 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *Recordset) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *Recordset) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *Zone) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *ZoneIAMBinding) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *Zone) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *ZoneIAMBinding) Hub() {} diff --git a/apis/dns/v1alpha1/zz_generated.deepcopy.go b/apis/dns/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..66b2aa4 --- /dev/null +++ b/apis/dns/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,873 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Recordset) DeepCopyInto(out *Recordset) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Recordset. +func (in *Recordset) DeepCopy() *Recordset { + if in == nil { + return nil + } + out := new(Recordset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Recordset) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordsetInitParameters) DeepCopyInto(out *RecordsetInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } + if in.ZoneIDRef != nil { + in, out := &in.ZoneIDRef, &out.ZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ZoneIDSelector != nil { + in, out := &in.ZoneIDSelector, &out.ZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordsetInitParameters. +func (in *RecordsetInitParameters) DeepCopy() *RecordsetInitParameters { + if in == nil { + return nil + } + out := new(RecordsetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordsetList) DeepCopyInto(out *RecordsetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Recordset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordsetList. +func (in *RecordsetList) DeepCopy() *RecordsetList { + if in == nil { + return nil + } + out := new(RecordsetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RecordsetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordsetObservation) DeepCopyInto(out *RecordsetObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordsetObservation. +func (in *RecordsetObservation) DeepCopy() *RecordsetObservation { + if in == nil { + return nil + } + out := new(RecordsetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordsetParameters) DeepCopyInto(out *RecordsetParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } + if in.ZoneIDRef != nil { + in, out := &in.ZoneIDRef, &out.ZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ZoneIDSelector != nil { + in, out := &in.ZoneIDSelector, &out.ZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordsetParameters. +func (in *RecordsetParameters) DeepCopy() *RecordsetParameters { + if in == nil { + return nil + } + out := new(RecordsetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordsetSpec) DeepCopyInto(out *RecordsetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordsetSpec. +func (in *RecordsetSpec) DeepCopy() *RecordsetSpec { + if in == nil { + return nil + } + out := new(RecordsetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordsetStatus) DeepCopyInto(out *RecordsetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordsetStatus. +func (in *RecordsetStatus) DeepCopy() *RecordsetStatus { + if in == nil { + return nil + } + out := new(RecordsetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Zone) DeepCopyInto(out *Zone) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Zone. +func (in *Zone) DeepCopy() *Zone { + if in == nil { + return nil + } + out := new(Zone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Zone) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneIAMBinding) DeepCopyInto(out *ZoneIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneIAMBinding. +func (in *ZoneIAMBinding) DeepCopy() *ZoneIAMBinding { + if in == nil { + return nil + } + out := new(ZoneIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZoneIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneIAMBindingInitParameters) DeepCopyInto(out *ZoneIAMBindingInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.DNSZoneIDRef != nil { + in, out := &in.DNSZoneIDRef, &out.DNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DNSZoneIDSelector != nil { + in, out := &in.DNSZoneIDSelector, &out.DNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneIAMBindingInitParameters. +func (in *ZoneIAMBindingInitParameters) DeepCopy() *ZoneIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(ZoneIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneIAMBindingList) DeepCopyInto(out *ZoneIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ZoneIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneIAMBindingList. +func (in *ZoneIAMBindingList) DeepCopy() *ZoneIAMBindingList { + if in == nil { + return nil + } + out := new(ZoneIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZoneIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneIAMBindingObservation) DeepCopyInto(out *ZoneIAMBindingObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneIAMBindingObservation. +func (in *ZoneIAMBindingObservation) DeepCopy() *ZoneIAMBindingObservation { + if in == nil { + return nil + } + out := new(ZoneIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneIAMBindingParameters) DeepCopyInto(out *ZoneIAMBindingParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.DNSZoneIDRef != nil { + in, out := &in.DNSZoneIDRef, &out.DNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DNSZoneIDSelector != nil { + in, out := &in.DNSZoneIDSelector, &out.DNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneIAMBindingParameters. +func (in *ZoneIAMBindingParameters) DeepCopy() *ZoneIAMBindingParameters { + if in == nil { + return nil + } + out := new(ZoneIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneIAMBindingSpec) DeepCopyInto(out *ZoneIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneIAMBindingSpec. +func (in *ZoneIAMBindingSpec) DeepCopy() *ZoneIAMBindingSpec { + if in == nil { + return nil + } + out := new(ZoneIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneIAMBindingStatus) DeepCopyInto(out *ZoneIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneIAMBindingStatus. +func (in *ZoneIAMBindingStatus) DeepCopy() *ZoneIAMBindingStatus { + if in == nil { + return nil + } + out := new(ZoneIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneInitParameters) DeepCopyInto(out *ZoneInitParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateNetworks != nil { + in, out := &in.PrivateNetworks, &out.PrivateNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrivateNetworksRefs != nil { + in, out := &in.PrivateNetworksRefs, &out.PrivateNetworksRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateNetworksSelector != nil { + in, out := &in.PrivateNetworksSelector, &out.PrivateNetworksSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Public != nil { + in, out := &in.Public, &out.Public + *out = new(bool) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneInitParameters. +func (in *ZoneInitParameters) DeepCopy() *ZoneInitParameters { + if in == nil { + return nil + } + out := new(ZoneInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneList) DeepCopyInto(out *ZoneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Zone, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneList. +func (in *ZoneList) DeepCopy() *ZoneList { + if in == nil { + return nil + } + out := new(ZoneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZoneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneObservation) DeepCopyInto(out *ZoneObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateNetworks != nil { + in, out := &in.PrivateNetworks, &out.PrivateNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Public != nil { + in, out := &in.Public, &out.Public + *out = new(bool) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneObservation. +func (in *ZoneObservation) DeepCopy() *ZoneObservation { + if in == nil { + return nil + } + out := new(ZoneObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneParameters) DeepCopyInto(out *ZoneParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateNetworks != nil { + in, out := &in.PrivateNetworks, &out.PrivateNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrivateNetworksRefs != nil { + in, out := &in.PrivateNetworksRefs, &out.PrivateNetworksRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateNetworksSelector != nil { + in, out := &in.PrivateNetworksSelector, &out.PrivateNetworksSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Public != nil { + in, out := &in.Public, &out.Public + *out = new(bool) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneParameters. +func (in *ZoneParameters) DeepCopy() *ZoneParameters { + if in == nil { + return nil + } + out := new(ZoneParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneSpec) DeepCopyInto(out *ZoneSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneSpec. +func (in *ZoneSpec) DeepCopy() *ZoneSpec { + if in == nil { + return nil + } + out := new(ZoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneStatus) DeepCopyInto(out *ZoneStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneStatus. +func (in *ZoneStatus) DeepCopy() *ZoneStatus { + if in == nil { + return nil + } + out := new(ZoneStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dns/v1alpha1/zz_generated.resolvers.go b/apis/dns/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..0f844d6 --- /dev/null +++ b/apis/dns/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,172 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Recordset. +func (mg *Recordset) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ZoneID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ZoneIDRef, + Selector: mg.Spec.ForProvider.ZoneIDSelector, + To: reference.To{ + List: &ZoneList{}, + Managed: &Zone{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ZoneID") + } + mg.Spec.ForProvider.ZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ZoneIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ZoneID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ZoneIDRef, + Selector: mg.Spec.InitProvider.ZoneIDSelector, + To: reference.To{ + List: &ZoneList{}, + Managed: &Zone{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ZoneID") + } + mg.Spec.InitProvider.ZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ZoneIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Zone. +func (mg *Zone) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.PrivateNetworks), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.PrivateNetworksRefs, + Selector: mg.Spec.ForProvider.PrivateNetworksSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateNetworks") + } + mg.Spec.ForProvider.PrivateNetworks = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.PrivateNetworksRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.PrivateNetworks), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.PrivateNetworksRefs, + Selector: mg.Spec.InitProvider.PrivateNetworksSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateNetworks") + } + mg.Spec.InitProvider.PrivateNetworks = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.PrivateNetworksRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this ZoneIAMBinding. +func (mg *ZoneIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DNSZoneID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DNSZoneIDRef, + Selector: mg.Spec.ForProvider.DNSZoneIDSelector, + To: reference.To{ + List: &ZoneList{}, + Managed: &Zone{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DNSZoneID") + } + mg.Spec.ForProvider.DNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DNSZoneIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DNSZoneID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DNSZoneIDRef, + Selector: mg.Spec.InitProvider.DNSZoneIDSelector, + To: reference.To{ + List: &ZoneList{}, + Managed: &Zone{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DNSZoneID") + } + mg.Spec.InitProvider.DNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DNSZoneIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/dns/v1alpha1/zz_groupversion_info.go b/apis/dns/v1alpha1/zz_groupversion_info.go index 0492f48..2b3a31b 100755 --- a/apis/dns/v1alpha1/zz_groupversion_info.go +++ b/apis/dns/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/dns/v1alpha1/zz_recordset_terraformed.go b/apis/dns/v1alpha1/zz_recordset_terraformed.go index 8841165..85b3943 100755 --- a/apis/dns/v1alpha1/zz_recordset_terraformed.go +++ b/apis/dns/v1alpha1/zz_recordset_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Recordset func (mg *Recordset) GetTerraformResourceType() string { - return "yandex_dns_recordset" + return "yandex_dns_recordset" } // GetConnectionDetailsMapping for this Recordset func (tr *Recordset) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Recordset func (tr *Recordset) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Recordset func (tr *Recordset) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Recordset func (tr *Recordset) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Recordset func (tr *Recordset) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Recordset func (tr *Recordset) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Recordset func (tr *Recordset) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Recordset func (tr *Recordset) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Recordset using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Recordset) LateInitialize(attrs []byte) (bool, error) { - params := &RecordsetParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &RecordsetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Recordset) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/dns/v1alpha1/zz_recordset_types.go b/apis/dns/v1alpha1/zz_recordset_types.go index 3cc3d7a..8dbe4b3 100755 --- a/apis/dns/v1alpha1/zz_recordset_types.go +++ b/apis/dns/v1alpha1/zz_recordset_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,103 +7,94 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type RecordsetInitParameters struct { + // The string data for the records in this record set. + // +listType=set + Data []*string `json:"data,omitempty" tf:"data,omitempty"` -// The string data for the records in this record set. -// +listType=set -Data []*string `json:"data,omitempty" tf:"data,omitempty"` + // The DNS name this record set will apply to. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The DNS name this record set will apply to. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The time-to-live of this record set (seconds). + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` -// The time-to-live of this record set (seconds). -TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + // The DNS record set type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// The DNS record set type. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // The id of the zone in which this record set will reside. + // +crossplane:generate:reference:type=Zone + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` -// The id of the zone in which this record set will reside. -// +crossplane:generate:reference:type=Zone -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // Reference to a Zone to populate zoneId. + // +kubebuilder:validation:Optional + ZoneIDRef *v1.Reference `json:"zoneIdRef,omitempty" tf:"-"` -// Reference to a Zone to populate zoneId. -// +kubebuilder:validation:Optional -ZoneIDRef *v1.Reference `json:"zoneIdRef,omitempty" tf:"-"` - -// Selector for a Zone to populate zoneId. -// +kubebuilder:validation:Optional -ZoneIDSelector *v1.Selector `json:"zoneIdSelector,omitempty" tf:"-"` + // Selector for a Zone to populate zoneId. + // +kubebuilder:validation:Optional + ZoneIDSelector *v1.Selector `json:"zoneIdSelector,omitempty" tf:"-"` } - type RecordsetObservation struct { + // The string data for the records in this record set. + // +listType=set + Data []*string `json:"data,omitempty" tf:"data,omitempty"` -// The string data for the records in this record set. -// +listType=set -Data []*string `json:"data,omitempty" tf:"data,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// The DNS name this record set will apply to. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The DNS name this record set will apply to. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The time-to-live of this record set (seconds). -TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + // The time-to-live of this record set (seconds). + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` -// The DNS record set type. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // The DNS record set type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// The id of the zone in which this record set will reside. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // The id of the zone in which this record set will reside. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type RecordsetParameters struct { + // The string data for the records in this record set. + // +kubebuilder:validation:Optional + // +listType=set + Data []*string `json:"data,omitempty" tf:"data,omitempty"` -// The string data for the records in this record set. -// +kubebuilder:validation:Optional -// +listType=set -Data []*string `json:"data,omitempty" tf:"data,omitempty"` - -// The DNS name this record set will apply to. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The DNS name this record set will apply to. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The time-to-live of this record set (seconds). -// +kubebuilder:validation:Optional -TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + // The time-to-live of this record set (seconds). + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` -// The DNS record set type. -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // The DNS record set type. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// The id of the zone in which this record set will reside. -// +crossplane:generate:reference:type=Zone -// +kubebuilder:validation:Optional -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // The id of the zone in which this record set will reside. + // +crossplane:generate:reference:type=Zone + // +kubebuilder:validation:Optional + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` -// Reference to a Zone to populate zoneId. -// +kubebuilder:validation:Optional -ZoneIDRef *v1.Reference `json:"zoneIdRef,omitempty" tf:"-"` + // Reference to a Zone to populate zoneId. + // +kubebuilder:validation:Optional + ZoneIDRef *v1.Reference `json:"zoneIdRef,omitempty" tf:"-"` -// Selector for a Zone to populate zoneId. -// +kubebuilder:validation:Optional -ZoneIDSelector *v1.Selector `json:"zoneIdSelector,omitempty" tf:"-"` + // Selector for a Zone to populate zoneId. + // +kubebuilder:validation:Optional + ZoneIDSelector *v1.Selector `json:"zoneIdSelector,omitempty" tf:"-"` } // RecordsetSpec defines the desired state of Recordset type RecordsetSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider RecordsetParameters `json:"forProvider"` + ForProvider RecordsetParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -116,20 +105,19 @@ type RecordsetSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider RecordsetInitParameters `json:"initProvider,omitempty"` + InitProvider RecordsetInitParameters `json:"initProvider,omitempty"` } // RecordsetStatus defines the observed state of Recordset. type RecordsetStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider RecordsetObservation `json:"atProvider,omitempty"` + AtProvider RecordsetObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Recordset is the Schema for the Recordsets API. Manages a DNS Recordset within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -139,12 +127,12 @@ type RecordsetStatus struct { type Recordset struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.data) || (has(self.initProvider) && has(self.initProvider.data))",message="spec.forProvider.data is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ttl) || (has(self.initProvider) && has(self.initProvider.ttl))",message="spec.forProvider.ttl is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" - Spec RecordsetSpec `json:"spec"` - Status RecordsetStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.data) || (has(self.initProvider) && has(self.initProvider.data))",message="spec.forProvider.data is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ttl) || (has(self.initProvider) && has(self.initProvider.ttl))",message="spec.forProvider.ttl is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec RecordsetSpec `json:"spec"` + Status RecordsetStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/dns/v1alpha1/zz_zone_terraformed.go b/apis/dns/v1alpha1/zz_zone_terraformed.go index 20750f2..24a938b 100755 --- a/apis/dns/v1alpha1/zz_zone_terraformed.go +++ b/apis/dns/v1alpha1/zz_zone_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Zone func (mg *Zone) GetTerraformResourceType() string { - return "yandex_dns_zone" + return "yandex_dns_zone" } // GetConnectionDetailsMapping for this Zone func (tr *Zone) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Zone func (tr *Zone) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Zone func (tr *Zone) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Zone func (tr *Zone) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Zone func (tr *Zone) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Zone func (tr *Zone) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Zone func (tr *Zone) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Zone func (tr *Zone) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Zone using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Zone) LateInitialize(attrs []byte) (bool, error) { - params := &ZoneParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &ZoneParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Zone) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/dns/v1alpha1/zz_zone_types.go b/apis/dns/v1alpha1/zz_zone_types.go index 70a15eb..8fca5cb 100755 --- a/apis/dns/v1alpha1/zz_zone_types.go +++ b/apis/dns/v1alpha1/zz_zone_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,183 +7,174 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type ZoneInitParameters struct { - -// (Boolean) Flag that protects the dns zone from accidental deletion. -// Flag that protects the dns zone from accidental deletion. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - -// (String) Description of the DNS zone. -// Description of the DNS zone. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// (String) ID of the folder to create a zone in. If it is not provided, the default provider folder is used. -// ID of the folder to create a zone in. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` - -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` - -// (Map of String) A set of key/value label pairs to assign to the DNS zone. -// A set of key/value label pairs to assign to the DNS zone. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - -// (String) User assigned name of a specific resource. Must be unique within the folder. -// User assigned name of a specific resource. Must be unique within the folder. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (Set of String) For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. -// For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -// +listType=set -PrivateNetworks []*string `json:"privateNetworks,omitempty" tf:"private_networks,omitempty"` - -// References to Network in vpc to populate privateNetworks. -// +kubebuilder:validation:Optional -PrivateNetworksRefs []v1.Reference `json:"privateNetworksRefs,omitempty" tf:"-"` - -// Selector for a list of Network in vpc to populate privateNetworks. -// +kubebuilder:validation:Optional -PrivateNetworksSelector *v1.Selector `json:"privateNetworksSelector,omitempty" tf:"-"` - -// (Boolean) The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. -// The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. -Public *bool `json:"public,omitempty" tf:"public,omitempty"` - -// (String) The DNS name of this zone, e.g. "example.com.". Must ends with dot. -// The DNS name of this zone, e.g. "example.com.". Must ends with dot. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // (Boolean) Flag that protects the dns zone from accidental deletion. + // Flag that protects the dns zone from accidental deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // (String) Description of the DNS zone. + // Description of the DNS zone. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (String) ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + // ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // (Map of String) A set of key/value label pairs to assign to the DNS zone. + // A set of key/value label pairs to assign to the DNS zone. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // (String) User assigned name of a specific resource. Must be unique within the folder. + // User assigned name of a specific resource. Must be unique within the folder. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (Set of String) For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + // For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +listType=set + PrivateNetworks []*string `json:"privateNetworks,omitempty" tf:"private_networks,omitempty"` + + // References to Network in vpc to populate privateNetworks. + // +kubebuilder:validation:Optional + PrivateNetworksRefs []v1.Reference `json:"privateNetworksRefs,omitempty" tf:"-"` + + // Selector for a list of Network in vpc to populate privateNetworks. + // +kubebuilder:validation:Optional + PrivateNetworksSelector *v1.Selector `json:"privateNetworksSelector,omitempty" tf:"-"` + + // (Boolean) The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + // The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + Public *bool `json:"public,omitempty" tf:"public,omitempty"` + + // (String) The DNS name of this zone, e.g. "example.com.". Must ends with dot. + // The DNS name of this zone, e.g. "example.com.". Must ends with dot. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type ZoneObservation struct { + // (String) The DNS zone creation timestamp. + // The DNS zone creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// (String) The DNS zone creation timestamp. -// The DNS zone creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` - -// (Boolean) Flag that protects the dns zone from accidental deletion. -// Flag that protects the dns zone from accidental deletion. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // (Boolean) Flag that protects the dns zone from accidental deletion. + // Flag that protects the dns zone from accidental deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// (String) Description of the DNS zone. -// Description of the DNS zone. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) Description of the DNS zone. + // Description of the DNS zone. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) ID of the folder to create a zone in. If it is not provided, the default provider folder is used. -// ID of the folder to create a zone in. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // (String) ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + // ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// (String) The ID of this resource. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// (Map of String) A set of key/value label pairs to assign to the DNS zone. -// A set of key/value label pairs to assign to the DNS zone. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // (Map of String) A set of key/value label pairs to assign to the DNS zone. + // A set of key/value label pairs to assign to the DNS zone. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// (String) User assigned name of a specific resource. Must be unique within the folder. -// User assigned name of a specific resource. Must be unique within the folder. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) User assigned name of a specific resource. Must be unique within the folder. + // User assigned name of a specific resource. Must be unique within the folder. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (Set of String) For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. -// For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. -// +listType=set -PrivateNetworks []*string `json:"privateNetworks,omitempty" tf:"private_networks,omitempty"` + // (Set of String) For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + // For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + // +listType=set + PrivateNetworks []*string `json:"privateNetworks,omitempty" tf:"private_networks,omitempty"` -// (Boolean) The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. -// The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. -Public *bool `json:"public,omitempty" tf:"public,omitempty"` + // (Boolean) The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + // The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + Public *bool `json:"public,omitempty" tf:"public,omitempty"` -// (String) The DNS name of this zone, e.g. "example.com.". Must ends with dot. -// The DNS name of this zone, e.g. "example.com.". Must ends with dot. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // (String) The DNS name of this zone, e.g. "example.com.". Must ends with dot. + // The DNS name of this zone, e.g. "example.com.". Must ends with dot. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type ZoneParameters struct { - -// (Boolean) Flag that protects the dns zone from accidental deletion. -// Flag that protects the dns zone from accidental deletion. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - -// (String) Description of the DNS zone. -// Description of the DNS zone. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// (String) ID of the folder to create a zone in. If it is not provided, the default provider folder is used. -// ID of the folder to create a zone in. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` - -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` - -// (Map of String) A set of key/value label pairs to assign to the DNS zone. -// A set of key/value label pairs to assign to the DNS zone. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - -// (String) User assigned name of a specific resource. Must be unique within the folder. -// User assigned name of a specific resource. Must be unique within the folder. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (Set of String) For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. -// For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -// +kubebuilder:validation:Optional -// +listType=set -PrivateNetworks []*string `json:"privateNetworks,omitempty" tf:"private_networks,omitempty"` - -// References to Network in vpc to populate privateNetworks. -// +kubebuilder:validation:Optional -PrivateNetworksRefs []v1.Reference `json:"privateNetworksRefs,omitempty" tf:"-"` - -// Selector for a list of Network in vpc to populate privateNetworks. -// +kubebuilder:validation:Optional -PrivateNetworksSelector *v1.Selector `json:"privateNetworksSelector,omitempty" tf:"-"` - -// (Boolean) The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. -// The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. -// +kubebuilder:validation:Optional -Public *bool `json:"public,omitempty" tf:"public,omitempty"` - -// (String) The DNS name of this zone, e.g. "example.com.". Must ends with dot. -// The DNS name of this zone, e.g. "example.com.". Must ends with dot. -// +kubebuilder:validation:Optional -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // (Boolean) Flag that protects the dns zone from accidental deletion. + // Flag that protects the dns zone from accidental deletion. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // (String) Description of the DNS zone. + // Description of the DNS zone. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (String) ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + // ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // (Map of String) A set of key/value label pairs to assign to the DNS zone. + // A set of key/value label pairs to assign to the DNS zone. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // (String) User assigned name of a specific resource. Must be unique within the folder. + // User assigned name of a specific resource. Must be unique within the folder. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (Set of String) For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + // For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + // +listType=set + PrivateNetworks []*string `json:"privateNetworks,omitempty" tf:"private_networks,omitempty"` + + // References to Network in vpc to populate privateNetworks. + // +kubebuilder:validation:Optional + PrivateNetworksRefs []v1.Reference `json:"privateNetworksRefs,omitempty" tf:"-"` + + // Selector for a list of Network in vpc to populate privateNetworks. + // +kubebuilder:validation:Optional + PrivateNetworksSelector *v1.Selector `json:"privateNetworksSelector,omitempty" tf:"-"` + + // (Boolean) The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + // The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + // +kubebuilder:validation:Optional + Public *bool `json:"public,omitempty" tf:"public,omitempty"` + + // (String) The DNS name of this zone, e.g. "example.com.". Must ends with dot. + // The DNS name of this zone, e.g. "example.com.". Must ends with dot. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } // ZoneSpec defines the desired state of Zone type ZoneSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider ZoneParameters `json:"forProvider"` + ForProvider ZoneParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -196,20 +185,19 @@ type ZoneSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider ZoneInitParameters `json:"initProvider,omitempty"` + InitProvider ZoneInitParameters `json:"initProvider,omitempty"` } // ZoneStatus defines the observed state of Zone. type ZoneStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider ZoneObservation `json:"atProvider,omitempty"` + AtProvider ZoneObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Zone is the Schema for the Zones API. Manages a DNS Zone within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -219,9 +207,9 @@ type ZoneStatus struct { type Zone struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.zone) || (has(self.initProvider) && has(self.initProvider.zone))",message="spec.forProvider.zone is a required parameter" - Spec ZoneSpec `json:"spec"` - Status ZoneStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.zone) || (has(self.initProvider) && has(self.initProvider.zone))",message="spec.forProvider.zone is a required parameter" + Spec ZoneSpec `json:"spec"` + Status ZoneStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/dns/v1alpha1/zz_zoneiambinding_terraformed.go b/apis/dns/v1alpha1/zz_zoneiambinding_terraformed.go index a5e48fe..084a814 100755 --- a/apis/dns/v1alpha1/zz_zoneiambinding_terraformed.go +++ b/apis/dns/v1alpha1/zz_zoneiambinding_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this ZoneIAMBinding func (mg *ZoneIAMBinding) GetTerraformResourceType() string { - return "yandex_dns_zone_iam_binding" + return "yandex_dns_zone_iam_binding" } // GetConnectionDetailsMapping for this ZoneIAMBinding func (tr *ZoneIAMBinding) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this ZoneIAMBinding func (tr *ZoneIAMBinding) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this ZoneIAMBinding func (tr *ZoneIAMBinding) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this ZoneIAMBinding func (tr *ZoneIAMBinding) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this ZoneIAMBinding func (tr *ZoneIAMBinding) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this ZoneIAMBinding func (tr *ZoneIAMBinding) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this ZoneIAMBinding func (tr *ZoneIAMBinding) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this ZoneIAMBinding func (tr *ZoneIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this ZoneIAMBinding using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *ZoneIAMBinding) LateInitialize(attrs []byte) (bool, error) { - params := &ZoneIAMBindingParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &ZoneIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *ZoneIAMBinding) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/dns/v1alpha1/zz_zoneiambinding_types.go b/apis/dns/v1alpha1/zz_zoneiambinding_types.go index 2fbe50a..e921902 100755 --- a/apis/dns/v1alpha1/zz_zoneiambinding_types.go +++ b/apis/dns/v1alpha1/zz_zoneiambinding_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,92 +7,83 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type ZoneIAMBindingInitParameters struct { + // The DNS Zone ID to apply a binding to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/dns/v1alpha1.Zone + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` -// The DNS Zone ID to apply a binding to. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/dns/v1alpha1.Zone -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + // Reference to a Zone in dns to populate dnsZoneId. + // +kubebuilder:validation:Optional + DNSZoneIDRef *v1.Reference `json:"dnsZoneIdRef,omitempty" tf:"-"` -// Reference to a Zone in dns to populate dnsZoneId. -// +kubebuilder:validation:Optional -DNSZoneIDRef *v1.Reference `json:"dnsZoneIdRef,omitempty" tf:"-"` + // Selector for a Zone in dns to populate dnsZoneId. + // +kubebuilder:validation:Optional + DNSZoneIDSelector *v1.Selector `json:"dnsZoneIdSelector,omitempty" tf:"-"` -// Selector for a Zone in dns to populate dnsZoneId. -// +kubebuilder:validation:Optional -DNSZoneIDSelector *v1.Selector `json:"dnsZoneIdSelector,omitempty" tf:"-"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role that should be applied. See roles. -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type ZoneIAMBindingObservation struct { + // The DNS Zone ID to apply a binding to. + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` -// The DNS Zone ID to apply a binding to. -DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be applied. See roles. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type ZoneIAMBindingParameters struct { + // The DNS Zone ID to apply a binding to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/dns/v1alpha1.Zone + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` -// The DNS Zone ID to apply a binding to. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/dns/v1alpha1.Zone -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` - -// Reference to a Zone in dns to populate dnsZoneId. -// +kubebuilder:validation:Optional -DNSZoneIDRef *v1.Reference `json:"dnsZoneIdRef,omitempty" tf:"-"` + // Reference to a Zone in dns to populate dnsZoneId. + // +kubebuilder:validation:Optional + DNSZoneIDRef *v1.Reference `json:"dnsZoneIdRef,omitempty" tf:"-"` -// Selector for a Zone in dns to populate dnsZoneId. -// +kubebuilder:validation:Optional -DNSZoneIDSelector *v1.Selector `json:"dnsZoneIdSelector,omitempty" tf:"-"` + // Selector for a Zone in dns to populate dnsZoneId. + // +kubebuilder:validation:Optional + DNSZoneIDSelector *v1.Selector `json:"dnsZoneIdSelector,omitempty" tf:"-"` -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +kubebuilder:validation:Optional -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be applied. See roles. -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. See roles. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // ZoneIAMBindingSpec defines the desired state of ZoneIAMBinding type ZoneIAMBindingSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider ZoneIAMBindingParameters `json:"forProvider"` + ForProvider ZoneIAMBindingParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -105,20 +94,19 @@ type ZoneIAMBindingSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider ZoneIAMBindingInitParameters `json:"initProvider,omitempty"` + InitProvider ZoneIAMBindingInitParameters `json:"initProvider,omitempty"` } // ZoneIAMBindingStatus defines the observed state of ZoneIAMBinding. type ZoneIAMBindingStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider ZoneIAMBindingObservation `json:"atProvider,omitempty"` + AtProvider ZoneIAMBindingObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // ZoneIAMBinding is the Schema for the ZoneIAMBindings API. Allows management of a single IAM binding for a // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -128,10 +116,10 @@ type ZoneIAMBindingStatus struct { type ZoneIAMBinding struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec ZoneIAMBindingSpec `json:"spec"` - Status ZoneIAMBindingStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec ZoneIAMBindingSpec `json:"spec"` + Status ZoneIAMBindingStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/function/v1alpha1/zz_generated.conversion_hubs.go b/apis/function/v1alpha1/zz_generated.conversion_hubs.go index dda12ce..2451a4a 100755 --- a/apis/function/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/function/v1alpha1/zz_generated.conversion_hubs.go @@ -1,16 +1,12 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *IAMBinding) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *IAMBinding) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *ScalingPolicy) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *Trigger) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *ScalingPolicy) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *Trigger) Hub() {} diff --git a/apis/function/v1alpha1/zz_generated.deepcopy.go b/apis/function/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b505208 --- /dev/null +++ b/apis/function/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,2546 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerInitParameters) DeepCopyInto(out *ContainerInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInitParameters. +func (in *ContainerInitParameters) DeepCopy() *ContainerInitParameters { + if in == nil { + return nil + } + out := new(ContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerObservation) DeepCopyInto(out *ContainerObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerObservation. +func (in *ContainerObservation) DeepCopy() *ContainerObservation { + if in == nil { + return nil + } + out := new(ContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerParameters) DeepCopyInto(out *ContainerParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParameters. +func (in *ContainerParameters) DeepCopy() *ContainerParameters { + if in == nil { + return nil + } + out := new(ContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRegistryInitParameters) DeepCopyInto(out *ContainerRegistryInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.CreateImage != nil { + in, out := &in.CreateImage, &out.CreateImage + *out = new(bool) + **out = **in + } + if in.CreateImageTag != nil { + in, out := &in.CreateImageTag, &out.CreateImageTag + *out = new(bool) + **out = **in + } + if in.DeleteImage != nil { + in, out := &in.DeleteImage, &out.DeleteImage + *out = new(bool) + **out = **in + } + if in.DeleteImageTag != nil { + in, out := &in.DeleteImageTag, &out.DeleteImageTag + *out = new(bool) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRegistryInitParameters. +func (in *ContainerRegistryInitParameters) DeepCopy() *ContainerRegistryInitParameters { + if in == nil { + return nil + } + out := new(ContainerRegistryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRegistryObservation) DeepCopyInto(out *ContainerRegistryObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.CreateImage != nil { + in, out := &in.CreateImage, &out.CreateImage + *out = new(bool) + **out = **in + } + if in.CreateImageTag != nil { + in, out := &in.CreateImageTag, &out.CreateImageTag + *out = new(bool) + **out = **in + } + if in.DeleteImage != nil { + in, out := &in.DeleteImage, &out.DeleteImage + *out = new(bool) + **out = **in + } + if in.DeleteImageTag != nil { + in, out := &in.DeleteImageTag, &out.DeleteImageTag + *out = new(bool) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRegistryObservation. +func (in *ContainerRegistryObservation) DeepCopy() *ContainerRegistryObservation { + if in == nil { + return nil + } + out := new(ContainerRegistryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRegistryParameters) DeepCopyInto(out *ContainerRegistryParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.CreateImage != nil { + in, out := &in.CreateImage, &out.CreateImage + *out = new(bool) + **out = **in + } + if in.CreateImageTag != nil { + in, out := &in.CreateImageTag, &out.CreateImageTag + *out = new(bool) + **out = **in + } + if in.DeleteImage != nil { + in, out := &in.DeleteImage, &out.DeleteImage + *out = new(bool) + **out = **in + } + if in.DeleteImageTag != nil { + in, out := &in.DeleteImageTag, &out.DeleteImageTag + *out = new(bool) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRegistryParameters. +func (in *ContainerRegistryParameters) DeepCopy() *ContainerRegistryParameters { + if in == nil { + return nil + } + out := new(ContainerRegistryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStreamsInitParameters) DeepCopyInto(out *DataStreamsInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStreamsInitParameters. +func (in *DataStreamsInitParameters) DeepCopy() *DataStreamsInitParameters { + if in == nil { + return nil + } + out := new(DataStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStreamsObservation) DeepCopyInto(out *DataStreamsObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStreamsObservation. +func (in *DataStreamsObservation) DeepCopy() *DataStreamsObservation { + if in == nil { + return nil + } + out := new(DataStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStreamsParameters) DeepCopyInto(out *DataStreamsParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStreamsParameters. +func (in *DataStreamsParameters) DeepCopy() *DataStreamsParameters { + if in == nil { + return nil + } + out := new(DataStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DlqInitParameters) DeepCopyInto(out *DlqInitParameters) { + *out = *in + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DlqInitParameters. +func (in *DlqInitParameters) DeepCopy() *DlqInitParameters { + if in == nil { + return nil + } + out := new(DlqInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DlqObservation) DeepCopyInto(out *DlqObservation) { + *out = *in + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DlqObservation. +func (in *DlqObservation) DeepCopy() *DlqObservation { + if in == nil { + return nil + } + out := new(DlqObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DlqParameters) DeepCopyInto(out *DlqParameters) { + *out = *in + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DlqParameters. +func (in *DlqParameters) DeepCopy() *DlqParameters { + if in == nil { + return nil + } + out := new(DlqParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionInitParameters) DeepCopyInto(out *FunctionInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionInitParameters. +func (in *FunctionInitParameters) DeepCopy() *FunctionInitParameters { + if in == nil { + return nil + } + out := new(FunctionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionObservation) DeepCopyInto(out *FunctionObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionObservation. +func (in *FunctionObservation) DeepCopy() *FunctionObservation { + if in == nil { + return nil + } + out := new(FunctionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionParameters) DeepCopyInto(out *FunctionParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionParameters. +func (in *FunctionParameters) DeepCopy() *FunctionParameters { + if in == nil { + return nil + } + out := new(FunctionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBinding) DeepCopyInto(out *IAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBinding. +func (in *IAMBinding) DeepCopy() *IAMBinding { + if in == nil { + return nil + } + out := new(IAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingInitParameters) DeepCopyInto(out *IAMBindingInitParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingInitParameters. +func (in *IAMBindingInitParameters) DeepCopy() *IAMBindingInitParameters { + if in == nil { + return nil + } + out := new(IAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingList) DeepCopyInto(out *IAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingList. +func (in *IAMBindingList) DeepCopy() *IAMBindingList { + if in == nil { + return nil + } + out := new(IAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingObservation) DeepCopyInto(out *IAMBindingObservation) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingObservation. +func (in *IAMBindingObservation) DeepCopy() *IAMBindingObservation { + if in == nil { + return nil + } + out := new(IAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingParameters) DeepCopyInto(out *IAMBindingParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingParameters. +func (in *IAMBindingParameters) DeepCopy() *IAMBindingParameters { + if in == nil { + return nil + } + out := new(IAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingSpec) DeepCopyInto(out *IAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingSpec. +func (in *IAMBindingSpec) DeepCopy() *IAMBindingSpec { + if in == nil { + return nil + } + out := new(IAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingStatus) DeepCopyInto(out *IAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingStatus. +func (in *IAMBindingStatus) DeepCopy() *IAMBindingStatus { + if in == nil { + return nil + } + out := new(IAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotInitParameters) DeepCopyInto(out *IotInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.DeviceID != nil { + in, out := &in.DeviceID, &out.DeviceID + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotInitParameters. +func (in *IotInitParameters) DeepCopy() *IotInitParameters { + if in == nil { + return nil + } + out := new(IotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotObservation) DeepCopyInto(out *IotObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.DeviceID != nil { + in, out := &in.DeviceID, &out.DeviceID + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotObservation. +func (in *IotObservation) DeepCopy() *IotObservation { + if in == nil { + return nil + } + out := new(IotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotParameters) DeepCopyInto(out *IotParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.DeviceID != nil { + in, out := &in.DeviceID, &out.DeviceID + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotParameters. +func (in *IotParameters) DeepCopy() *IotParameters { + if in == nil { + return nil + } + out := new(IotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogGroupInitParameters) DeepCopyInto(out *LogGroupInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.LogGroupIds != nil { + in, out := &in.LogGroupIds, &out.LogGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogGroupInitParameters. +func (in *LogGroupInitParameters) DeepCopy() *LogGroupInitParameters { + if in == nil { + return nil + } + out := new(LogGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogGroupObservation) DeepCopyInto(out *LogGroupObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.LogGroupIds != nil { + in, out := &in.LogGroupIds, &out.LogGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogGroupObservation. +func (in *LogGroupObservation) DeepCopy() *LogGroupObservation { + if in == nil { + return nil + } + out := new(LogGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogGroupParameters) DeepCopyInto(out *LogGroupParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.LogGroupIds != nil { + in, out := &in.LogGroupIds, &out.LogGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogGroupParameters. +func (in *LogGroupParameters) DeepCopy() *LogGroupParameters { + if in == nil { + return nil + } + out := new(LogGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInitParameters) DeepCopyInto(out *LoggingInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.Levels != nil { + in, out := &in.Levels, &out.Levels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StreamNames != nil { + in, out := &in.StreamNames, &out.StreamNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInitParameters. +func (in *LoggingInitParameters) DeepCopy() *LoggingInitParameters { + if in == nil { + return nil + } + out := new(LoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingObservation) DeepCopyInto(out *LoggingObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.Levels != nil { + in, out := &in.Levels, &out.Levels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StreamNames != nil { + in, out := &in.StreamNames, &out.StreamNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingObservation. +func (in *LoggingObservation) DeepCopy() *LoggingObservation { + if in == nil { + return nil + } + out := new(LoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingParameters) DeepCopyInto(out *LoggingParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.Levels != nil { + in, out := &in.Levels, &out.Levels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StreamNames != nil { + in, out := &in.StreamNames, &out.StreamNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingParameters. +func (in *LoggingParameters) DeepCopy() *LoggingParameters { + if in == nil { + return nil + } + out := new(LoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MailInitParameters) DeepCopyInto(out *MailInitParameters) { + *out = *in + if in.AttachmentsBucketID != nil { + in, out := &in.AttachmentsBucketID, &out.AttachmentsBucketID + *out = new(string) + **out = **in + } + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MailInitParameters. +func (in *MailInitParameters) DeepCopy() *MailInitParameters { + if in == nil { + return nil + } + out := new(MailInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MailObservation) DeepCopyInto(out *MailObservation) { + *out = *in + if in.AttachmentsBucketID != nil { + in, out := &in.AttachmentsBucketID, &out.AttachmentsBucketID + *out = new(string) + **out = **in + } + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MailObservation. +func (in *MailObservation) DeepCopy() *MailObservation { + if in == nil { + return nil + } + out := new(MailObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MailParameters) DeepCopyInto(out *MailParameters) { + *out = *in + if in.AttachmentsBucketID != nil { + in, out := &in.AttachmentsBucketID, &out.AttachmentsBucketID + *out = new(string) + **out = **in + } + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MailParameters. +func (in *MailParameters) DeepCopy() *MailParameters { + if in == nil { + return nil + } + out := new(MailParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageQueueInitParameters) DeepCopyInto(out *MessageQueueInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.VisibilityTimeout != nil { + in, out := &in.VisibilityTimeout, &out.VisibilityTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageQueueInitParameters. +func (in *MessageQueueInitParameters) DeepCopy() *MessageQueueInitParameters { + if in == nil { + return nil + } + out := new(MessageQueueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageQueueObservation) DeepCopyInto(out *MessageQueueObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.VisibilityTimeout != nil { + in, out := &in.VisibilityTimeout, &out.VisibilityTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageQueueObservation. +func (in *MessageQueueObservation) DeepCopy() *MessageQueueObservation { + if in == nil { + return nil + } + out := new(MessageQueueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageQueueParameters) DeepCopyInto(out *MessageQueueParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.VisibilityTimeout != nil { + in, out := &in.VisibilityTimeout, &out.VisibilityTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageQueueParameters. +func (in *MessageQueueParameters) DeepCopy() *MessageQueueParameters { + if in == nil { + return nil + } + out := new(MessageQueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageInitParameters) DeepCopyInto(out *ObjectStorageInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.BucketID != nil { + in, out := &in.BucketID, &out.BucketID + *out = new(string) + **out = **in + } + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(bool) + **out = **in + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } + if in.Update != nil { + in, out := &in.Update, &out.Update + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageInitParameters. +func (in *ObjectStorageInitParameters) DeepCopy() *ObjectStorageInitParameters { + if in == nil { + return nil + } + out := new(ObjectStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageObservation) DeepCopyInto(out *ObjectStorageObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.BucketID != nil { + in, out := &in.BucketID, &out.BucketID + *out = new(string) + **out = **in + } + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(bool) + **out = **in + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } + if in.Update != nil { + in, out := &in.Update, &out.Update + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageObservation. +func (in *ObjectStorageObservation) DeepCopy() *ObjectStorageObservation { + if in == nil { + return nil + } + out := new(ObjectStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageParameters) DeepCopyInto(out *ObjectStorageParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.BucketID != nil { + in, out := &in.BucketID, &out.BucketID + *out = new(string) + **out = **in + } + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(bool) + **out = **in + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } + if in.Update != nil { + in, out := &in.Update, &out.Update + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageParameters. +func (in *ObjectStorageParameters) DeepCopy() *ObjectStorageParameters { + if in == nil { + return nil + } + out := new(ObjectStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyInitParameters) DeepCopyInto(out *PolicyInitParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.ZoneInstancesLimit != nil { + in, out := &in.ZoneInstancesLimit, &out.ZoneInstancesLimit + *out = new(float64) + **out = **in + } + if in.ZoneRequestsLimit != nil { + in, out := &in.ZoneRequestsLimit, &out.ZoneRequestsLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyInitParameters. +func (in *PolicyInitParameters) DeepCopy() *PolicyInitParameters { + if in == nil { + return nil + } + out := new(PolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.ZoneInstancesLimit != nil { + in, out := &in.ZoneInstancesLimit, &out.ZoneInstancesLimit + *out = new(float64) + **out = **in + } + if in.ZoneRequestsLimit != nil { + in, out := &in.ZoneRequestsLimit, &out.ZoneRequestsLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. +func (in *PolicyObservation) DeepCopy() *PolicyObservation { + if in == nil { + return nil + } + out := new(PolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.ZoneInstancesLimit != nil { + in, out := &in.ZoneInstancesLimit, &out.ZoneInstancesLimit + *out = new(float64) + **out = **in + } + if in.ZoneRequestsLimit != nil { + in, out := &in.ZoneRequestsLimit, &out.ZoneRequestsLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. +func (in *PolicyParameters) DeepCopy() *PolicyParameters { + if in == nil { + return nil + } + out := new(PolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicy) DeepCopyInto(out *ScalingPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicy. +func (in *ScalingPolicy) DeepCopy() *ScalingPolicy { + if in == nil { + return nil + } + out := new(ScalingPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalingPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyInitParameters) DeepCopyInto(out *ScalingPolicyInitParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = make([]PolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyInitParameters. +func (in *ScalingPolicyInitParameters) DeepCopy() *ScalingPolicyInitParameters { + if in == nil { + return nil + } + out := new(ScalingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyList) DeepCopyInto(out *ScalingPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScalingPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyList. +func (in *ScalingPolicyList) DeepCopy() *ScalingPolicyList { + if in == nil { + return nil + } + out := new(ScalingPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalingPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyObservation) DeepCopyInto(out *ScalingPolicyObservation) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = make([]PolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyObservation. +func (in *ScalingPolicyObservation) DeepCopy() *ScalingPolicyObservation { + if in == nil { + return nil + } + out := new(ScalingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyParameters) DeepCopyInto(out *ScalingPolicyParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = make([]PolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyParameters. +func (in *ScalingPolicyParameters) DeepCopy() *ScalingPolicyParameters { + if in == nil { + return nil + } + out := new(ScalingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicySpec) DeepCopyInto(out *ScalingPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicySpec. +func (in *ScalingPolicySpec) DeepCopy() *ScalingPolicySpec { + if in == nil { + return nil + } + out := new(ScalingPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyStatus) DeepCopyInto(out *ScalingPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyStatus. +func (in *ScalingPolicyStatus) DeepCopy() *ScalingPolicyStatus { + if in == nil { + return nil + } + out := new(ScalingPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimerInitParameters) DeepCopyInto(out *TimerInitParameters) { + *out = *in + if in.CronExpression != nil { + in, out := &in.CronExpression, &out.CronExpression + *out = new(string) + **out = **in + } + if in.Payload != nil { + in, out := &in.Payload, &out.Payload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimerInitParameters. +func (in *TimerInitParameters) DeepCopy() *TimerInitParameters { + if in == nil { + return nil + } + out := new(TimerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimerObservation) DeepCopyInto(out *TimerObservation) { + *out = *in + if in.CronExpression != nil { + in, out := &in.CronExpression, &out.CronExpression + *out = new(string) + **out = **in + } + if in.Payload != nil { + in, out := &in.Payload, &out.Payload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimerObservation. +func (in *TimerObservation) DeepCopy() *TimerObservation { + if in == nil { + return nil + } + out := new(TimerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimerParameters) DeepCopyInto(out *TimerParameters) { + *out = *in + if in.CronExpression != nil { + in, out := &in.CronExpression, &out.CronExpression + *out = new(string) + **out = **in + } + if in.Payload != nil { + in, out := &in.Payload, &out.Payload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimerParameters. +func (in *TimerParameters) DeepCopy() *TimerParameters { + if in == nil { + return nil + } + out := new(TimerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Trigger) DeepCopyInto(out *Trigger) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Trigger. +func (in *Trigger) DeepCopy() *Trigger { + if in == nil { + return nil + } + out := new(Trigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Trigger) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerInitParameters) DeepCopyInto(out *TriggerInitParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerRegistry != nil { + in, out := &in.ContainerRegistry, &out.ContainerRegistry + *out = make([]ContainerRegistryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DataStreams != nil { + in, out := &in.DataStreams, &out.DataStreams + *out = make([]DataStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Dlq != nil { + in, out := &in.Dlq, &out.Dlq + *out = make([]DlqInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = make([]FunctionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Iot != nil { + in, out := &in.Iot, &out.Iot + *out = make([]IotInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = make([]LogGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = make([]LoggingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mail != nil { + in, out := &in.Mail, &out.Mail + *out = make([]MailInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MessageQueue != nil { + in, out := &in.MessageQueue, &out.MessageQueue + *out = make([]MessageQueueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timer != nil { + in, out := &in.Timer, &out.Timer + *out = make([]TimerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerInitParameters. +func (in *TriggerInitParameters) DeepCopy() *TriggerInitParameters { + if in == nil { + return nil + } + out := new(TriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerList) DeepCopyInto(out *TriggerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Trigger, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerList. +func (in *TriggerList) DeepCopy() *TriggerList { + if in == nil { + return nil + } + out := new(TriggerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TriggerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerObservation) DeepCopyInto(out *TriggerObservation) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerRegistry != nil { + in, out := &in.ContainerRegistry, &out.ContainerRegistry + *out = make([]ContainerRegistryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DataStreams != nil { + in, out := &in.DataStreams, &out.DataStreams + *out = make([]DataStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Dlq != nil { + in, out := &in.Dlq, &out.Dlq + *out = make([]DlqObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = make([]FunctionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Iot != nil { + in, out := &in.Iot, &out.Iot + *out = make([]IotObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = make([]LogGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = make([]LoggingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mail != nil { + in, out := &in.Mail, &out.Mail + *out = make([]MailObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MessageQueue != nil { + in, out := &in.MessageQueue, &out.MessageQueue + *out = make([]MessageQueueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timer != nil { + in, out := &in.Timer, &out.Timer + *out = make([]TimerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerObservation. +func (in *TriggerObservation) DeepCopy() *TriggerObservation { + if in == nil { + return nil + } + out := new(TriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerParameters) DeepCopyInto(out *TriggerParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerRegistry != nil { + in, out := &in.ContainerRegistry, &out.ContainerRegistry + *out = make([]ContainerRegistryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DataStreams != nil { + in, out := &in.DataStreams, &out.DataStreams + *out = make([]DataStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Dlq != nil { + in, out := &in.Dlq, &out.Dlq + *out = make([]DlqParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = make([]FunctionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Iot != nil { + in, out := &in.Iot, &out.Iot + *out = make([]IotParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = make([]LogGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = make([]LoggingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mail != nil { + in, out := &in.Mail, &out.Mail + *out = make([]MailParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MessageQueue != nil { + in, out := &in.MessageQueue, &out.MessageQueue + *out = make([]MessageQueueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timer != nil { + in, out := &in.Timer, &out.Timer + *out = make([]TimerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerParameters. +func (in *TriggerParameters) DeepCopy() *TriggerParameters { + if in == nil { + return nil + } + out := new(TriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerSpec) DeepCopyInto(out *TriggerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSpec. +func (in *TriggerSpec) DeepCopy() *TriggerSpec { + if in == nil { + return nil + } + out := new(TriggerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerStatus) DeepCopyInto(out *TriggerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerStatus. +func (in *TriggerStatus) DeepCopy() *TriggerStatus { + if in == nil { + return nil + } + out := new(TriggerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/function/v1alpha1/zz_generated.resolvers.go b/apis/function/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..3e66c4e --- /dev/null +++ b/apis/function/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Trigger. +func (mg *Trigger) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/function/v1alpha1/zz_groupversion_info.go b/apis/function/v1alpha1/zz_groupversion_info.go index c7edd85..20856ac 100755 --- a/apis/function/v1alpha1/zz_groupversion_info.go +++ b/apis/function/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/function/v1alpha1/zz_iambinding_terraformed.go b/apis/function/v1alpha1/zz_iambinding_terraformed.go index 889de60..9ef4da5 100755 --- a/apis/function/v1alpha1/zz_iambinding_terraformed.go +++ b/apis/function/v1alpha1/zz_iambinding_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this IAMBinding func (mg *IAMBinding) GetTerraformResourceType() string { - return "yandex_function_iam_binding" + return "yandex_function_iam_binding" } // GetConnectionDetailsMapping for this IAMBinding func (tr *IAMBinding) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this IAMBinding func (tr *IAMBinding) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this IAMBinding func (tr *IAMBinding) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this IAMBinding func (tr *IAMBinding) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this IAMBinding func (tr *IAMBinding) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this IAMBinding func (tr *IAMBinding) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this IAMBinding func (tr *IAMBinding) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this IAMBinding func (tr *IAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this IAMBinding using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *IAMBinding) LateInitialize(attrs []byte) (bool, error) { - params := &IAMBindingParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &IAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *IAMBinding) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/function/v1alpha1/zz_iambinding_types.go b/apis/function/v1alpha1/zz_iambinding_types.go index ff98e65..8c1ee9e 100755 --- a/apis/function/v1alpha1/zz_iambinding_types.go +++ b/apis/function/v1alpha1/zz_iambinding_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,72 +7,63 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type IAMBindingInitParameters struct { + // The Yandex Cloud Function ID to apply a binding to. + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` -// The Yandex Cloud Function ID to apply a binding to. -FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // The role that should be applied. See roles + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role that should be applied. See roles -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type IAMBindingObservation struct { + // The Yandex Cloud Function ID to apply a binding to. + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` -// The Yandex Cloud Function ID to apply a binding to. -FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be applied. See roles -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. See roles + Role *string `json:"role,omitempty" tf:"role,omitempty"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type IAMBindingParameters struct { + // The Yandex Cloud Function ID to apply a binding to. + // +kubebuilder:validation:Optional + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` -// The Yandex Cloud Function ID to apply a binding to. -// +kubebuilder:validation:Optional -FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` - -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +kubebuilder:validation:Optional -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be applied. See roles -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. See roles + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // IAMBindingSpec defines the desired state of IAMBinding type IAMBindingSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider IAMBindingParameters `json:"forProvider"` + ForProvider IAMBindingParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -85,20 +74,19 @@ type IAMBindingSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider IAMBindingInitParameters `json:"initProvider,omitempty"` + InitProvider IAMBindingInitParameters `json:"initProvider,omitempty"` } // IAMBindingStatus defines the observed state of IAMBinding. type IAMBindingStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider IAMBindingObservation `json:"atProvider,omitempty"` + AtProvider IAMBindingObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // IAMBinding is the Schema for the IAMBindings API. Allows management of a single IAM binding for a // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -108,11 +96,11 @@ type IAMBindingStatus struct { type IAMBinding struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.functionId) || (has(self.initProvider) && has(self.initProvider.functionId))",message="spec.forProvider.functionId is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec IAMBindingSpec `json:"spec"` - Status IAMBindingStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.functionId) || (has(self.initProvider) && has(self.initProvider.functionId))",message="spec.forProvider.functionId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec IAMBindingSpec `json:"spec"` + Status IAMBindingStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/function/v1alpha1/zz_scalingpolicy_terraformed.go b/apis/function/v1alpha1/zz_scalingpolicy_terraformed.go index 1bbb91a..a6ea42e 100755 --- a/apis/function/v1alpha1/zz_scalingpolicy_terraformed.go +++ b/apis/function/v1alpha1/zz_scalingpolicy_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this ScalingPolicy func (mg *ScalingPolicy) GetTerraformResourceType() string { - return "yandex_function_scaling_policy" + return "yandex_function_scaling_policy" } // GetConnectionDetailsMapping for this ScalingPolicy func (tr *ScalingPolicy) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this ScalingPolicy func (tr *ScalingPolicy) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this ScalingPolicy func (tr *ScalingPolicy) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this ScalingPolicy func (tr *ScalingPolicy) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this ScalingPolicy func (tr *ScalingPolicy) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this ScalingPolicy func (tr *ScalingPolicy) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this ScalingPolicy func (tr *ScalingPolicy) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this ScalingPolicy func (tr *ScalingPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this ScalingPolicy using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *ScalingPolicy) LateInitialize(attrs []byte) (bool, error) { - params := &ScalingPolicyParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &ScalingPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *ScalingPolicy) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/function/v1alpha1/zz_scalingpolicy_types.go b/apis/function/v1alpha1/zz_scalingpolicy_types.go index 5a8ec59..bb4ccd2 100755 --- a/apis/function/v1alpha1/zz_scalingpolicy_types.go +++ b/apis/function/v1alpha1/zz_scalingpolicy_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,97 +7,82 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type PolicyInitParameters struct { + // Yandex.Cloud Function version tag for Yandex Cloud Function scaling policy + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` -// Yandex.Cloud Function version tag for Yandex Cloud Function scaling policy -Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` - -// max number of instances in one zone for Yandex.Cloud Function with tag -ZoneInstancesLimit *float64 `json:"zoneInstancesLimit,omitempty" tf:"zone_instances_limit,omitempty"` + // max number of instances in one zone for Yandex.Cloud Function with tag + ZoneInstancesLimit *float64 `json:"zoneInstancesLimit,omitempty" tf:"zone_instances_limit,omitempty"` -// max number of requests in one zone for Yandex.Cloud Function with tag -ZoneRequestsLimit *float64 `json:"zoneRequestsLimit,omitempty" tf:"zone_requests_limit,omitempty"` + // max number of requests in one zone for Yandex.Cloud Function with tag + ZoneRequestsLimit *float64 `json:"zoneRequestsLimit,omitempty" tf:"zone_requests_limit,omitempty"` } - type PolicyObservation struct { + // Yandex.Cloud Function version tag for Yandex Cloud Function scaling policy + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` -// Yandex.Cloud Function version tag for Yandex Cloud Function scaling policy -Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` - -// max number of instances in one zone for Yandex.Cloud Function with tag -ZoneInstancesLimit *float64 `json:"zoneInstancesLimit,omitempty" tf:"zone_instances_limit,omitempty"` + // max number of instances in one zone for Yandex.Cloud Function with tag + ZoneInstancesLimit *float64 `json:"zoneInstancesLimit,omitempty" tf:"zone_instances_limit,omitempty"` -// max number of requests in one zone for Yandex.Cloud Function with tag -ZoneRequestsLimit *float64 `json:"zoneRequestsLimit,omitempty" tf:"zone_requests_limit,omitempty"` + // max number of requests in one zone for Yandex.Cloud Function with tag + ZoneRequestsLimit *float64 `json:"zoneRequestsLimit,omitempty" tf:"zone_requests_limit,omitempty"` } - type PolicyParameters struct { + // Yandex.Cloud Function version tag for Yandex Cloud Function scaling policy + // +kubebuilder:validation:Optional + Tag *string `json:"tag" tf:"tag,omitempty"` -// Yandex.Cloud Function version tag for Yandex Cloud Function scaling policy -// +kubebuilder:validation:Optional -Tag *string `json:"tag" tf:"tag,omitempty"` - -// max number of instances in one zone for Yandex.Cloud Function with tag -// +kubebuilder:validation:Optional -ZoneInstancesLimit *float64 `json:"zoneInstancesLimit,omitempty" tf:"zone_instances_limit,omitempty"` + // max number of instances in one zone for Yandex.Cloud Function with tag + // +kubebuilder:validation:Optional + ZoneInstancesLimit *float64 `json:"zoneInstancesLimit,omitempty" tf:"zone_instances_limit,omitempty"` -// max number of requests in one zone for Yandex.Cloud Function with tag -// +kubebuilder:validation:Optional -ZoneRequestsLimit *float64 `json:"zoneRequestsLimit,omitempty" tf:"zone_requests_limit,omitempty"` + // max number of requests in one zone for Yandex.Cloud Function with tag + // +kubebuilder:validation:Optional + ZoneRequestsLimit *float64 `json:"zoneRequestsLimit,omitempty" tf:"zone_requests_limit,omitempty"` } - type ScalingPolicyInitParameters struct { + // Yandex Cloud Function id used to define function + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` -// Yandex Cloud Function id used to define function -FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` - -// list definition for Yandex Cloud Function scaling policies -Policy []PolicyInitParameters `json:"policy,omitempty" tf:"policy,omitempty"` + // list definition for Yandex Cloud Function scaling policies + Policy []PolicyInitParameters `json:"policy,omitempty" tf:"policy,omitempty"` } - type ScalingPolicyObservation struct { + // Yandex Cloud Function id used to define function + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` -// Yandex Cloud Function id used to define function -FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// list definition for Yandex Cloud Function scaling policies -Policy []PolicyObservation `json:"policy,omitempty" tf:"policy,omitempty"` + // list definition for Yandex Cloud Function scaling policies + Policy []PolicyObservation `json:"policy,omitempty" tf:"policy,omitempty"` } - type ScalingPolicyParameters struct { + // Yandex Cloud Function id used to define function + // +kubebuilder:validation:Optional + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` -// Yandex Cloud Function id used to define function -// +kubebuilder:validation:Optional -FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` - -// list definition for Yandex Cloud Function scaling policies -// +kubebuilder:validation:Optional -Policy []PolicyParameters `json:"policy,omitempty" tf:"policy,omitempty"` + // list definition for Yandex Cloud Function scaling policies + // +kubebuilder:validation:Optional + Policy []PolicyParameters `json:"policy,omitempty" tf:"policy,omitempty"` } // ScalingPolicySpec defines the desired state of ScalingPolicy type ScalingPolicySpec struct { v1.ResourceSpec `json:",inline"` - ForProvider ScalingPolicyParameters `json:"forProvider"` + ForProvider ScalingPolicyParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -110,20 +93,19 @@ type ScalingPolicySpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider ScalingPolicyInitParameters `json:"initProvider,omitempty"` + InitProvider ScalingPolicyInitParameters `json:"initProvider,omitempty"` } // ScalingPolicyStatus defines the observed state of ScalingPolicy. type ScalingPolicyStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider ScalingPolicyObservation `json:"atProvider,omitempty"` + AtProvider ScalingPolicyObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // ScalingPolicy is the Schema for the ScalingPolicys API. Allows management of a Yandex Cloud Function Scaling Policy. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -133,9 +115,9 @@ type ScalingPolicyStatus struct { type ScalingPolicy struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.functionId) || (has(self.initProvider) && has(self.initProvider.functionId))",message="spec.forProvider.functionId is a required parameter" - Spec ScalingPolicySpec `json:"spec"` - Status ScalingPolicyStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.functionId) || (has(self.initProvider) && has(self.initProvider.functionId))",message="spec.forProvider.functionId is a required parameter" + Spec ScalingPolicySpec `json:"spec"` + Status ScalingPolicyStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/function/v1alpha1/zz_trigger_terraformed.go b/apis/function/v1alpha1/zz_trigger_terraformed.go index f38119c..bcb71aa 100755 --- a/apis/function/v1alpha1/zz_trigger_terraformed.go +++ b/apis/function/v1alpha1/zz_trigger_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Trigger func (mg *Trigger) GetTerraformResourceType() string { - return "yandex_function_trigger" + return "yandex_function_trigger" } // GetConnectionDetailsMapping for this Trigger func (tr *Trigger) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Trigger func (tr *Trigger) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Trigger func (tr *Trigger) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Trigger func (tr *Trigger) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Trigger func (tr *Trigger) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Trigger func (tr *Trigger) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Trigger func (tr *Trigger) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Trigger func (tr *Trigger) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Trigger using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Trigger) LateInitialize(attrs []byte) (bool, error) { - params := &TriggerParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &TriggerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Trigger) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/function/v1alpha1/zz_trigger_types.go b/apis/function/v1alpha1/zz_trigger_types.go index 60034d2..a2193a8 100755 --- a/apis/function/v1alpha1/zz_trigger_types.go +++ b/apis/function/v1alpha1/zz_trigger_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,1003 +7,920 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type ContainerInitParameters struct { + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Yandex.Cloud Function ID for Yandex Cloud Functions Trigger -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Path for Yandex.Cloud Serverless Container for Yandex Cloud Functions Trigger + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// Path for Yandex.Cloud Serverless Container for Yandex Cloud Functions Trigger -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` -// Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger -RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` -// Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger -RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` - -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type ContainerObservation struct { + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Yandex.Cloud Function ID for Yandex Cloud Functions Trigger -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// Path for Yandex.Cloud Serverless Container for Yandex Cloud Functions Trigger -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // Path for Yandex.Cloud Serverless Container for Yandex Cloud Functions Trigger + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger -RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` -// Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger -RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type ContainerParameters struct { + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` -// Yandex.Cloud Function ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -ID *string `json:"id" tf:"id,omitempty"` - -// Path for Yandex.Cloud Serverless Container for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // Path for Yandex.Cloud Serverless Container for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` -// Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type ContainerRegistryInitParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Boolean flag for setting create image event for Yandex Cloud Functions Trigger -CreateImage *bool `json:"createImage,omitempty" tf:"create_image,omitempty"` + // Boolean flag for setting create image event for Yandex Cloud Functions Trigger + CreateImage *bool `json:"createImage,omitempty" tf:"create_image,omitempty"` -// Boolean flag for setting create image tag event for Yandex Cloud Functions Trigger -CreateImageTag *bool `json:"createImageTag,omitempty" tf:"create_image_tag,omitempty"` + // Boolean flag for setting create image tag event for Yandex Cloud Functions Trigger + CreateImageTag *bool `json:"createImageTag,omitempty" tf:"create_image_tag,omitempty"` -// Boolean flag for setting delete image event for Yandex Cloud Functions Trigger -DeleteImage *bool `json:"deleteImage,omitempty" tf:"delete_image,omitempty"` + // Boolean flag for setting delete image event for Yandex Cloud Functions Trigger + DeleteImage *bool `json:"deleteImage,omitempty" tf:"delete_image,omitempty"` -// Boolean flag for setting delete image tag event for Yandex Cloud Functions Trigger -DeleteImageTag *bool `json:"deleteImageTag,omitempty" tf:"delete_image_tag,omitempty"` + // Boolean flag for setting delete image tag event for Yandex Cloud Functions Trigger + DeleteImageTag *bool `json:"deleteImageTag,omitempty" tf:"delete_image_tag,omitempty"` -// Image name filter setting for Yandex Cloud Functions Trigger -ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + // Image name filter setting for Yandex Cloud Functions Trigger + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` -// IoT Registry ID for Yandex Cloud Functions Trigger -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + // IoT Registry ID for Yandex Cloud Functions Trigger + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` -// Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger -Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` } - type ContainerRegistryObservation struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Boolean flag for setting create image event for Yandex Cloud Functions Trigger + CreateImage *bool `json:"createImage,omitempty" tf:"create_image,omitempty"` -// Boolean flag for setting create image event for Yandex Cloud Functions Trigger -CreateImage *bool `json:"createImage,omitempty" tf:"create_image,omitempty"` + // Boolean flag for setting create image tag event for Yandex Cloud Functions Trigger + CreateImageTag *bool `json:"createImageTag,omitempty" tf:"create_image_tag,omitempty"` -// Boolean flag for setting create image tag event for Yandex Cloud Functions Trigger -CreateImageTag *bool `json:"createImageTag,omitempty" tf:"create_image_tag,omitempty"` + // Boolean flag for setting delete image event for Yandex Cloud Functions Trigger + DeleteImage *bool `json:"deleteImage,omitempty" tf:"delete_image,omitempty"` -// Boolean flag for setting delete image event for Yandex Cloud Functions Trigger -DeleteImage *bool `json:"deleteImage,omitempty" tf:"delete_image,omitempty"` + // Boolean flag for setting delete image tag event for Yandex Cloud Functions Trigger + DeleteImageTag *bool `json:"deleteImageTag,omitempty" tf:"delete_image_tag,omitempty"` -// Boolean flag for setting delete image tag event for Yandex Cloud Functions Trigger -DeleteImageTag *bool `json:"deleteImageTag,omitempty" tf:"delete_image_tag,omitempty"` + // Image name filter setting for Yandex Cloud Functions Trigger + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` -// Image name filter setting for Yandex Cloud Functions Trigger -ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + // IoT Registry ID for Yandex Cloud Functions Trigger + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` -// IoT Registry ID for Yandex Cloud Functions Trigger -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` - -// Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger -Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` } - type ContainerRegistryParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Boolean flag for setting create image event for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -CreateImage *bool `json:"createImage,omitempty" tf:"create_image,omitempty"` + // Boolean flag for setting create image event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + CreateImage *bool `json:"createImage,omitempty" tf:"create_image,omitempty"` -// Boolean flag for setting create image tag event for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -CreateImageTag *bool `json:"createImageTag,omitempty" tf:"create_image_tag,omitempty"` + // Boolean flag for setting create image tag event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + CreateImageTag *bool `json:"createImageTag,omitempty" tf:"create_image_tag,omitempty"` -// Boolean flag for setting delete image event for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -DeleteImage *bool `json:"deleteImage,omitempty" tf:"delete_image,omitempty"` + // Boolean flag for setting delete image event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + DeleteImage *bool `json:"deleteImage,omitempty" tf:"delete_image,omitempty"` -// Boolean flag for setting delete image tag event for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -DeleteImageTag *bool `json:"deleteImageTag,omitempty" tf:"delete_image_tag,omitempty"` + // Boolean flag for setting delete image tag event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + DeleteImageTag *bool `json:"deleteImageTag,omitempty" tf:"delete_image_tag,omitempty"` -// Image name filter setting for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + // Image name filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` -// IoT Registry ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -RegistryID *string `json:"registryId" tf:"registry_id,omitempty"` + // IoT Registry ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RegistryID *string `json:"registryId" tf:"registry_id,omitempty"` -// Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` } - type DataStreamsInitParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Stream database for Yandex Cloud Functions Trigger -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Stream database for Yandex Cloud Functions Trigger + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Stream name for Yandex Cloud Functions Trigger -StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` + // Stream name for Yandex Cloud Functions Trigger + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` } - type DataStreamsObservation struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Stream database for Yandex Cloud Functions Trigger + Database *string `json:"database,omitempty" tf:"database,omitempty"` -// Stream database for Yandex Cloud Functions Trigger -Database *string `json:"database,omitempty" tf:"database,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// Stream name for Yandex Cloud Functions Trigger -StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` + // Stream name for Yandex Cloud Functions Trigger + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` } - type DataStreamsParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Stream database for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Database *string `json:"database" tf:"database,omitempty"` -// Stream database for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Database *string `json:"database" tf:"database,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` - -// Stream name for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -StreamName *string `json:"streamName" tf:"stream_name,omitempty"` + // Stream name for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName" tf:"stream_name,omitempty"` } - type DlqInitParameters struct { + // Message Queue ID for Yandex Cloud Functions Trigger + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` -// Message Queue ID for Yandex Cloud Functions Trigger -QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` - -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type DlqObservation struct { + // Message Queue ID for Yandex Cloud Functions Trigger + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` -// Message Queue ID for Yandex Cloud Functions Trigger -QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` - -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type DlqParameters struct { + // Message Queue ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + QueueID *string `json:"queueId" tf:"queue_id,omitempty"` -// Message Queue ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -QueueID *string `json:"queueId" tf:"queue_id,omitempty"` - -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` } - type FunctionInitParameters struct { + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Yandex.Cloud Function ID for Yandex Cloud Functions Trigger -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger -RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` -// Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger -RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger -Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` } - type FunctionObservation struct { + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Yandex.Cloud Function ID for Yandex Cloud Functions Trigger -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger -RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` -// Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger -RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger -Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` } - type FunctionParameters struct { + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` -// Yandex.Cloud Function ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -ID *string `json:"id" tf:"id,omitempty"` - -// Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` -// Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` } - type IotInitParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // IoT Device ID for Yandex Cloud Functions Trigger + DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` -// IoT Device ID for Yandex Cloud Functions Trigger -DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` + // IoT Registry ID for Yandex Cloud Functions Trigger + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` -// IoT Registry ID for Yandex Cloud Functions Trigger -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` - -// IoT Topic for Yandex Cloud Functions Trigger -Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` + // IoT Topic for Yandex Cloud Functions Trigger + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` } - type IotObservation struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // IoT Device ID for Yandex Cloud Functions Trigger + DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` -// IoT Device ID for Yandex Cloud Functions Trigger -DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` + // IoT Registry ID for Yandex Cloud Functions Trigger + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` -// IoT Registry ID for Yandex Cloud Functions Trigger -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` - -// IoT Topic for Yandex Cloud Functions Trigger -Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` + // IoT Topic for Yandex Cloud Functions Trigger + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` } - type IotParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// IoT Device ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` + // IoT Device ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` -// IoT Registry ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -RegistryID *string `json:"registryId" tf:"registry_id,omitempty"` + // IoT Registry ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RegistryID *string `json:"registryId" tf:"registry_id,omitempty"` -// IoT Topic for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` + // IoT Topic for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` } - type LogGroupInitParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// +listType=set -LogGroupIds []*string `json:"logGroupIds,omitempty" tf:"log_group_ids,omitempty"` + // +listType=set + LogGroupIds []*string `json:"logGroupIds,omitempty" tf:"log_group_ids,omitempty"` } - type LogGroupObservation struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// +listType=set -LogGroupIds []*string `json:"logGroupIds,omitempty" tf:"log_group_ids,omitempty"` + // +listType=set + LogGroupIds []*string `json:"logGroupIds,omitempty" tf:"log_group_ids,omitempty"` } - type LogGroupParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` - -// +kubebuilder:validation:Optional -// +listType=set -LogGroupIds []*string `json:"logGroupIds" tf:"log_group_ids,omitempty"` + // +kubebuilder:validation:Optional + // +listType=set + LogGroupIds []*string `json:"logGroupIds" tf:"log_group_ids,omitempty"` } - type LoggingInitParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Logging group ID for Yandex Cloud Functions Trigger -GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + // Logging group ID for Yandex Cloud Functions Trigger + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` -// Logging level filter setting for Yandex Cloud Functions Trigger -// +listType=set -Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` + // Logging level filter setting for Yandex Cloud Functions Trigger + // +listType=set + Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` -// Resource ID filter setting for Yandex Cloud Functions Trigger -// +listType=set -ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + // Resource ID filter setting for Yandex Cloud Functions Trigger + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` -// Resource type filter setting for Yandex Cloud Functions Trigger -// +listType=set -ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + // Resource type filter setting for Yandex Cloud Functions Trigger + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` -// Logging stream name filter setting for Yandex Cloud Functions Trigger -// +listType=set -StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` + // Logging stream name filter setting for Yandex Cloud Functions Trigger + // +listType=set + StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` } - type LoggingObservation struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Logging group ID for Yandex Cloud Functions Trigger -GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + // Logging group ID for Yandex Cloud Functions Trigger + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` -// Logging level filter setting for Yandex Cloud Functions Trigger -// +listType=set -Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` + // Logging level filter setting for Yandex Cloud Functions Trigger + // +listType=set + Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` -// Resource ID filter setting for Yandex Cloud Functions Trigger -// +listType=set -ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + // Resource ID filter setting for Yandex Cloud Functions Trigger + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` -// Resource type filter setting for Yandex Cloud Functions Trigger -// +listType=set -ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + // Resource type filter setting for Yandex Cloud Functions Trigger + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` -// Logging stream name filter setting for Yandex Cloud Functions Trigger -// +listType=set -StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` + // Logging stream name filter setting for Yandex Cloud Functions Trigger + // +listType=set + StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` } - type LoggingParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Logging group ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + GroupID *string `json:"groupId" tf:"group_id,omitempty"` -// Logging group ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -GroupID *string `json:"groupId" tf:"group_id,omitempty"` + // Logging level filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +listType=set + Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` -// Logging level filter setting for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -// +listType=set -Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` + // Resource ID filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` -// Resource ID filter setting for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -// +listType=set -ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + // Resource type filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` -// Resource type filter setting for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -// +listType=set -ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` - -// Logging stream name filter setting for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -// +listType=set -StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` + // Logging stream name filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +listType=set + StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` } - type MailInitParameters struct { + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + AttachmentsBucketID *string `json:"attachmentsBucketId,omitempty" tf:"attachments_bucket_id,omitempty"` -// Object Storage Bucket ID for Yandex Cloud Functions Trigger -AttachmentsBucketID *string `json:"attachmentsBucketId,omitempty" tf:"attachments_bucket_id,omitempty"` + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` - -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type MailObservation struct { + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + AttachmentsBucketID *string `json:"attachmentsBucketId,omitempty" tf:"attachments_bucket_id,omitempty"` -// Object Storage Bucket ID for Yandex Cloud Functions Trigger -AttachmentsBucketID *string `json:"attachmentsBucketId,omitempty" tf:"attachments_bucket_id,omitempty"` - -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type MailParameters struct { + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + AttachmentsBucketID *string `json:"attachmentsBucketId,omitempty" tf:"attachments_bucket_id,omitempty"` -// Object Storage Bucket ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -AttachmentsBucketID *string `json:"attachmentsBucketId,omitempty" tf:"attachments_bucket_id,omitempty"` - -// Batch Duration in seconds for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type MessageQueueInitParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Message Queue ID for Yandex Cloud Functions Trigger + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` -// Message Queue ID for Yandex Cloud Functions Trigger -QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// Visibility timeout for Yandex Cloud Functions Trigger -VisibilityTimeout *string `json:"visibilityTimeout,omitempty" tf:"visibility_timeout,omitempty"` + // Visibility timeout for Yandex Cloud Functions Trigger + VisibilityTimeout *string `json:"visibilityTimeout,omitempty" tf:"visibility_timeout,omitempty"` } - type MessageQueueObservation struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Message Queue ID for Yandex Cloud Functions Trigger -QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` + // Message Queue ID for Yandex Cloud Functions Trigger + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Visibility timeout for Yandex Cloud Functions Trigger -VisibilityTimeout *string `json:"visibilityTimeout,omitempty" tf:"visibility_timeout,omitempty"` + // Visibility timeout for Yandex Cloud Functions Trigger + VisibilityTimeout *string `json:"visibilityTimeout,omitempty" tf:"visibility_timeout,omitempty"` } - type MessageQueueParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Message Queue ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -QueueID *string `json:"queueId" tf:"queue_id,omitempty"` + // Message Queue ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + QueueID *string `json:"queueId" tf:"queue_id,omitempty"` -// Message Queue Service Account ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` -// Visibility timeout for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -VisibilityTimeout *string `json:"visibilityTimeout,omitempty" tf:"visibility_timeout,omitempty"` + // Visibility timeout for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + VisibilityTimeout *string `json:"visibilityTimeout,omitempty" tf:"visibility_timeout,omitempty"` } - type ObjectStorageInitParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Object Storage Bucket ID for Yandex Cloud Functions Trigger -BucketID *string `json:"bucketId,omitempty" tf:"bucket_id,omitempty"` + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + BucketID *string `json:"bucketId,omitempty" tf:"bucket_id,omitempty"` -// Boolean flag for setting create event for Yandex Cloud Functions Trigger -Create *bool `json:"create,omitempty" tf:"create,omitempty"` + // Boolean flag for setting create event for Yandex Cloud Functions Trigger + Create *bool `json:"create,omitempty" tf:"create,omitempty"` -// Boolean flag for setting delete event for Yandex Cloud Functions Trigger -Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + // Boolean flag for setting delete event for Yandex Cloud Functions Trigger + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` -// Prefix for Object Storage for Yandex Cloud Functions Trigger -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix for Object Storage for Yandex Cloud Functions Trigger + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Suffix for Object Storage for Yandex Cloud Functions Trigger -Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` + // Suffix for Object Storage for Yandex Cloud Functions Trigger + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` -// Boolean flag for setting update event for Yandex Cloud Functions Trigger -Update *bool `json:"update,omitempty" tf:"update,omitempty"` + // Boolean flag for setting update event for Yandex Cloud Functions Trigger + Update *bool `json:"update,omitempty" tf:"update,omitempty"` } - type ObjectStorageObservation struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Batch Size for Yandex Cloud Functions Trigger -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + BucketID *string `json:"bucketId,omitempty" tf:"bucket_id,omitempty"` -// Object Storage Bucket ID for Yandex Cloud Functions Trigger -BucketID *string `json:"bucketId,omitempty" tf:"bucket_id,omitempty"` + // Boolean flag for setting create event for Yandex Cloud Functions Trigger + Create *bool `json:"create,omitempty" tf:"create,omitempty"` -// Boolean flag for setting create event for Yandex Cloud Functions Trigger -Create *bool `json:"create,omitempty" tf:"create,omitempty"` + // Boolean flag for setting delete event for Yandex Cloud Functions Trigger + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` -// Boolean flag for setting delete event for Yandex Cloud Functions Trigger -Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + // Prefix for Object Storage for Yandex Cloud Functions Trigger + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Prefix for Object Storage for Yandex Cloud Functions Trigger -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Suffix for Object Storage for Yandex Cloud Functions Trigger + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` -// Suffix for Object Storage for Yandex Cloud Functions Trigger -Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` - -// Boolean flag for setting update event for Yandex Cloud Functions Trigger -Update *bool `json:"update,omitempty" tf:"update,omitempty"` + // Boolean flag for setting update event for Yandex Cloud Functions Trigger + Update *bool `json:"update,omitempty" tf:"update,omitempty"` } - type ObjectStorageParameters struct { + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` -// Batch Duration in seconds for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` - -// Batch Size for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` -// Object Storage Bucket ID for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -BucketID *string `json:"bucketId" tf:"bucket_id,omitempty"` + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BucketID *string `json:"bucketId" tf:"bucket_id,omitempty"` -// Boolean flag for setting create event for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Create *bool `json:"create,omitempty" tf:"create,omitempty"` + // Boolean flag for setting create event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Create *bool `json:"create,omitempty" tf:"create,omitempty"` -// Boolean flag for setting delete event for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + // Boolean flag for setting delete event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` -// Prefix for Object Storage for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix for Object Storage for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Suffix for Object Storage for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` + // Suffix for Object Storage for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` -// Boolean flag for setting update event for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Update *bool `json:"update,omitempty" tf:"update,omitempty"` + // Boolean flag for setting update event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Update *bool `json:"update,omitempty" tf:"update,omitempty"` } - type TimerInitParameters struct { + // Cron expression for timer for Yandex Cloud Functions Trigger + CronExpression *string `json:"cronExpression,omitempty" tf:"cron_expression,omitempty"` -// Cron expression for timer for Yandex Cloud Functions Trigger -CronExpression *string `json:"cronExpression,omitempty" tf:"cron_expression,omitempty"` - -// Payload to be passed to function -Payload *string `json:"payload,omitempty" tf:"payload,omitempty"` + // Payload to be passed to function + Payload *string `json:"payload,omitempty" tf:"payload,omitempty"` } - type TimerObservation struct { + // Cron expression for timer for Yandex Cloud Functions Trigger + CronExpression *string `json:"cronExpression,omitempty" tf:"cron_expression,omitempty"` -// Cron expression for timer for Yandex Cloud Functions Trigger -CronExpression *string `json:"cronExpression,omitempty" tf:"cron_expression,omitempty"` - -// Payload to be passed to function -Payload *string `json:"payload,omitempty" tf:"payload,omitempty"` + // Payload to be passed to function + Payload *string `json:"payload,omitempty" tf:"payload,omitempty"` } - type TimerParameters struct { + // Cron expression for timer for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + CronExpression *string `json:"cronExpression" tf:"cron_expression,omitempty"` -// Cron expression for timer for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -CronExpression *string `json:"cronExpression" tf:"cron_expression,omitempty"` - -// Payload to be passed to function -// +kubebuilder:validation:Optional -Payload *string `json:"payload,omitempty" tf:"payload,omitempty"` + // Payload to be passed to function + // +kubebuilder:validation:Optional + Payload *string `json:"payload,omitempty" tf:"payload,omitempty"` } - type TriggerInitParameters struct { + Container []ContainerInitParameters `json:"container,omitempty" tf:"container,omitempty"` + // Container Registry settings definition for Yandex Cloud Functions Trigger, if present + ContainerRegistry []ContainerRegistryInitParameters `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` -Container []ContainerInitParameters `json:"container,omitempty" tf:"container,omitempty"` - -// Container Registry settings definition for Yandex Cloud Functions Trigger, if present -ContainerRegistry []ContainerRegistryInitParameters `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` + // Data Streams settings definition for Yandex Cloud Functions Trigger, if present + DataStreams []DataStreamsInitParameters `json:"dataStreams,omitempty" tf:"data_streams,omitempty"` -// Data Streams settings definition for Yandex Cloud Functions Trigger, if present -DataStreams []DataStreamsInitParameters `json:"dataStreams,omitempty" tf:"data_streams,omitempty"` + // Description of the Yandex Cloud Functions Trigger + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the Yandex Cloud Functions Trigger -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Dead Letter Queue settings definition for Yandex Cloud Functions Trigger + Dlq []DlqInitParameters `json:"dlq,omitempty" tf:"dlq,omitempty"` -// Dead Letter Queue settings definition for Yandex Cloud Functions Trigger -Dlq []DlqInitParameters `json:"dlq,omitempty" tf:"dlq,omitempty"` + // Folder ID for the Yandex Cloud Functions Trigger + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder ID for the Yandex Cloud Functions Trigger -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Yandex.Cloud Function settings definition for Yandex Cloud Functions Trigger + Function []FunctionInitParameters `json:"function,omitempty" tf:"function,omitempty"` -// Yandex.Cloud Function settings definition for Yandex Cloud Functions Trigger -Function []FunctionInitParameters `json:"function,omitempty" tf:"function,omitempty"` + // IoT settings definition for Yandex Cloud Functions Trigger, if present. Only one section iot or message_queue or object_storage or timer can be defined. + Iot []IotInitParameters `json:"iot,omitempty" tf:"iot,omitempty"` -// IoT settings definition for Yandex Cloud Functions Trigger, if present. Only one section iot or message_queue or object_storage or timer can be defined. -Iot []IotInitParameters `json:"iot,omitempty" tf:"iot,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud Functions Trigger + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Functions Trigger -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + LogGroup []LogGroupInitParameters `json:"logGroup,omitempty" tf:"log_group,omitempty"` -LogGroup []LogGroupInitParameters `json:"logGroup,omitempty" tf:"log_group,omitempty"` + // Logging settings definition for Yandex Cloud Functions Trigger, if present + Logging []LoggingInitParameters `json:"logging,omitempty" tf:"logging,omitempty"` -// Logging settings definition for Yandex Cloud Functions Trigger, if present -Logging []LoggingInitParameters `json:"logging,omitempty" tf:"logging,omitempty"` + // Logging settings definition for Yandex Cloud Functions Trigger, if present + Mail []MailInitParameters `json:"mail,omitempty" tf:"mail,omitempty"` -// Logging settings definition for Yandex Cloud Functions Trigger, if present -Mail []MailInitParameters `json:"mail,omitempty" tf:"mail,omitempty"` + // Message Queue settings definition for Yandex Cloud Functions Trigger, if present + MessageQueue []MessageQueueInitParameters `json:"messageQueue,omitempty" tf:"message_queue,omitempty"` -// Message Queue settings definition for Yandex Cloud Functions Trigger, if present -MessageQueue []MessageQueueInitParameters `json:"messageQueue,omitempty" tf:"message_queue,omitempty"` + // Yandex Cloud Functions Trigger name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Yandex Cloud Functions Trigger name used to define trigger -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Object Storage settings definition for Yandex Cloud Functions Trigger, if present + ObjectStorage []ObjectStorageInitParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` -// Object Storage settings definition for Yandex Cloud Functions Trigger, if present -ObjectStorage []ObjectStorageInitParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` - -// Timer settings definition for Yandex Cloud Functions Trigger, if present -Timer []TimerInitParameters `json:"timer,omitempty" tf:"timer,omitempty"` + // Timer settings definition for Yandex Cloud Functions Trigger, if present + Timer []TimerInitParameters `json:"timer,omitempty" tf:"timer,omitempty"` } - type TriggerObservation struct { + Container []ContainerObservation `json:"container,omitempty" tf:"container,omitempty"` + // Container Registry settings definition for Yandex Cloud Functions Trigger, if present + ContainerRegistry []ContainerRegistryObservation `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` -Container []ContainerObservation `json:"container,omitempty" tf:"container,omitempty"` + // Creation timestamp of the Yandex Cloud Functions Trigger + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Container Registry settings definition for Yandex Cloud Functions Trigger, if present -ContainerRegistry []ContainerRegistryObservation `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` + // Data Streams settings definition for Yandex Cloud Functions Trigger, if present + DataStreams []DataStreamsObservation `json:"dataStreams,omitempty" tf:"data_streams,omitempty"` -// Creation timestamp of the Yandex Cloud Functions Trigger -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Description of the Yandex Cloud Functions Trigger + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Data Streams settings definition for Yandex Cloud Functions Trigger, if present -DataStreams []DataStreamsObservation `json:"dataStreams,omitempty" tf:"data_streams,omitempty"` + // Dead Letter Queue settings definition for Yandex Cloud Functions Trigger + Dlq []DlqObservation `json:"dlq,omitempty" tf:"dlq,omitempty"` -// Description of the Yandex Cloud Functions Trigger -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Folder ID for the Yandex Cloud Functions Trigger + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Dead Letter Queue settings definition for Yandex Cloud Functions Trigger -Dlq []DlqObservation `json:"dlq,omitempty" tf:"dlq,omitempty"` + // Yandex.Cloud Function settings definition for Yandex Cloud Functions Trigger + Function []FunctionObservation `json:"function,omitempty" tf:"function,omitempty"` -// Folder ID for the Yandex Cloud Functions Trigger -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Yandex.Cloud Function settings definition for Yandex Cloud Functions Trigger -Function []FunctionObservation `json:"function,omitempty" tf:"function,omitempty"` + // IoT settings definition for Yandex Cloud Functions Trigger, if present. Only one section iot or message_queue or object_storage or timer can be defined. + Iot []IotObservation `json:"iot,omitempty" tf:"iot,omitempty"` -// Yandex.Cloud Function ID for Yandex Cloud Functions Trigger -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud Functions Trigger + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// IoT settings definition for Yandex Cloud Functions Trigger, if present. Only one section iot or message_queue or object_storage or timer can be defined. -Iot []IotObservation `json:"iot,omitempty" tf:"iot,omitempty"` + LogGroup []LogGroupObservation `json:"logGroup,omitempty" tf:"log_group,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Functions Trigger -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Logging settings definition for Yandex Cloud Functions Trigger, if present + Logging []LoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` -LogGroup []LogGroupObservation `json:"logGroup,omitempty" tf:"log_group,omitempty"` + // Logging settings definition for Yandex Cloud Functions Trigger, if present + Mail []MailObservation `json:"mail,omitempty" tf:"mail,omitempty"` -// Logging settings definition for Yandex Cloud Functions Trigger, if present -Logging []LoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` + // Message Queue settings definition for Yandex Cloud Functions Trigger, if present + MessageQueue []MessageQueueObservation `json:"messageQueue,omitempty" tf:"message_queue,omitempty"` -// Logging settings definition for Yandex Cloud Functions Trigger, if present -Mail []MailObservation `json:"mail,omitempty" tf:"mail,omitempty"` + // Yandex Cloud Functions Trigger name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Message Queue settings definition for Yandex Cloud Functions Trigger, if present -MessageQueue []MessageQueueObservation `json:"messageQueue,omitempty" tf:"message_queue,omitempty"` + // Object Storage settings definition for Yandex Cloud Functions Trigger, if present + ObjectStorage []ObjectStorageObservation `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` -// Yandex Cloud Functions Trigger name used to define trigger -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Object Storage settings definition for Yandex Cloud Functions Trigger, if present -ObjectStorage []ObjectStorageObservation `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` - -// Timer settings definition for Yandex Cloud Functions Trigger, if present -Timer []TimerObservation `json:"timer,omitempty" tf:"timer,omitempty"` + // Timer settings definition for Yandex Cloud Functions Trigger, if present + Timer []TimerObservation `json:"timer,omitempty" tf:"timer,omitempty"` } - type TriggerParameters struct { + // +kubebuilder:validation:Optional + Container []ContainerParameters `json:"container,omitempty" tf:"container,omitempty"` -// +kubebuilder:validation:Optional -Container []ContainerParameters `json:"container,omitempty" tf:"container,omitempty"` + // Container Registry settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + ContainerRegistry []ContainerRegistryParameters `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` -// Container Registry settings definition for Yandex Cloud Functions Trigger, if present -// +kubebuilder:validation:Optional -ContainerRegistry []ContainerRegistryParameters `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` + // Data Streams settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + DataStreams []DataStreamsParameters `json:"dataStreams,omitempty" tf:"data_streams,omitempty"` -// Data Streams settings definition for Yandex Cloud Functions Trigger, if present -// +kubebuilder:validation:Optional -DataStreams []DataStreamsParameters `json:"dataStreams,omitempty" tf:"data_streams,omitempty"` + // Description of the Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Dead Letter Queue settings definition for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Dlq []DlqParameters `json:"dlq,omitempty" tf:"dlq,omitempty"` -// Dead Letter Queue settings definition for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Dlq []DlqParameters `json:"dlq,omitempty" tf:"dlq,omitempty"` + // Folder ID for the Yandex Cloud Functions Trigger + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder ID for the Yandex Cloud Functions Trigger -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Yandex.Cloud Function settings definition for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Function []FunctionParameters `json:"function,omitempty" tf:"function,omitempty"` -// Yandex.Cloud Function settings definition for Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -Function []FunctionParameters `json:"function,omitempty" tf:"function,omitempty"` + // IoT settings definition for Yandex Cloud Functions Trigger, if present. Only one section iot or message_queue or object_storage or timer can be defined. + // +kubebuilder:validation:Optional + Iot []IotParameters `json:"iot,omitempty" tf:"iot,omitempty"` -// IoT settings definition for Yandex Cloud Functions Trigger, if present. Only one section iot or message_queue or object_storage or timer can be defined. -// +kubebuilder:validation:Optional -Iot []IotParameters `json:"iot,omitempty" tf:"iot,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Functions Trigger -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // +kubebuilder:validation:Optional + LogGroup []LogGroupParameters `json:"logGroup,omitempty" tf:"log_group,omitempty"` -// +kubebuilder:validation:Optional -LogGroup []LogGroupParameters `json:"logGroup,omitempty" tf:"log_group,omitempty"` + // Logging settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + Logging []LoggingParameters `json:"logging,omitempty" tf:"logging,omitempty"` -// Logging settings definition for Yandex Cloud Functions Trigger, if present -// +kubebuilder:validation:Optional -Logging []LoggingParameters `json:"logging,omitempty" tf:"logging,omitempty"` + // Logging settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + Mail []MailParameters `json:"mail,omitempty" tf:"mail,omitempty"` -// Logging settings definition for Yandex Cloud Functions Trigger, if present -// +kubebuilder:validation:Optional -Mail []MailParameters `json:"mail,omitempty" tf:"mail,omitempty"` + // Message Queue settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + MessageQueue []MessageQueueParameters `json:"messageQueue,omitempty" tf:"message_queue,omitempty"` -// Message Queue settings definition for Yandex Cloud Functions Trigger, if present -// +kubebuilder:validation:Optional -MessageQueue []MessageQueueParameters `json:"messageQueue,omitempty" tf:"message_queue,omitempty"` + // Yandex Cloud Functions Trigger name used to define trigger + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Yandex Cloud Functions Trigger name used to define trigger -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Object Storage settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + ObjectStorage []ObjectStorageParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` -// Object Storage settings definition for Yandex Cloud Functions Trigger, if present -// +kubebuilder:validation:Optional -ObjectStorage []ObjectStorageParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` - -// Timer settings definition for Yandex Cloud Functions Trigger, if present -// +kubebuilder:validation:Optional -Timer []TimerParameters `json:"timer,omitempty" tf:"timer,omitempty"` + // Timer settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + Timer []TimerParameters `json:"timer,omitempty" tf:"timer,omitempty"` } // TriggerSpec defines the desired state of Trigger type TriggerSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider TriggerParameters `json:"forProvider"` + ForProvider TriggerParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -1016,20 +931,19 @@ type TriggerSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider TriggerInitParameters `json:"initProvider,omitempty"` + InitProvider TriggerInitParameters `json:"initProvider,omitempty"` } // TriggerStatus defines the observed state of Trigger. type TriggerStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider TriggerObservation `json:"atProvider,omitempty"` + AtProvider TriggerObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Trigger is the Schema for the Triggers API. Allows management of a Yandex Cloud Functions Trigger. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -1039,9 +953,9 @@ type TriggerStatus struct { type Trigger struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec TriggerSpec `json:"spec"` - Status TriggerStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec TriggerSpec `json:"spec"` + Status TriggerStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/iam/v1alpha1/zz_generated.conversion_hubs.go b/apis/iam/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..5e61ff7 --- /dev/null +++ b/apis/iam/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,24 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *ServiceAccount) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ServiceAccountAPIKey) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ServiceAccountIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ServiceAccountIAMMember) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ServiceAccountIAMPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ServiceAccountKey) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ServiceAccountStaticAccessKey) Hub() {} diff --git a/apis/iam/v1alpha1/zz_generated.deepcopy.go b/apis/iam/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..803b00e --- /dev/null +++ b/apis/iam/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1973 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputToLockboxInitParameters) DeepCopyInto(out *OutputToLockboxInitParameters) { + *out = *in + if in.EntryForSecretKey != nil { + in, out := &in.EntryForSecretKey, &out.EntryForSecretKey + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputToLockboxInitParameters. +func (in *OutputToLockboxInitParameters) DeepCopy() *OutputToLockboxInitParameters { + if in == nil { + return nil + } + out := new(OutputToLockboxInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputToLockboxObservation) DeepCopyInto(out *OutputToLockboxObservation) { + *out = *in + if in.EntryForSecretKey != nil { + in, out := &in.EntryForSecretKey, &out.EntryForSecretKey + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputToLockboxObservation. +func (in *OutputToLockboxObservation) DeepCopy() *OutputToLockboxObservation { + if in == nil { + return nil + } + out := new(OutputToLockboxObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputToLockboxParameters) DeepCopyInto(out *OutputToLockboxParameters) { + *out = *in + if in.EntryForSecretKey != nil { + in, out := &in.EntryForSecretKey, &out.EntryForSecretKey + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputToLockboxParameters. +func (in *OutputToLockboxParameters) DeepCopy() *OutputToLockboxParameters { + if in == nil { + return nil + } + out := new(OutputToLockboxParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. +func (in *ServiceAccount) DeepCopy() *ServiceAccount { + if in == nil { + return nil + } + out := new(ServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountAPIKey) DeepCopyInto(out *ServiceAccountAPIKey) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountAPIKey. +func (in *ServiceAccountAPIKey) DeepCopy() *ServiceAccountAPIKey { + if in == nil { + return nil + } + out := new(ServiceAccountAPIKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountAPIKey) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountAPIKeyInitParameters) DeepCopyInto(out *ServiceAccountAPIKeyInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExpiresAt != nil { + in, out := &in.ExpiresAt, &out.ExpiresAt + *out = new(string) + **out = **in + } + if in.OutputToLockbox != nil { + in, out := &in.OutputToLockbox, &out.OutputToLockbox + *out = make([]OutputToLockboxInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PgpKey != nil { + in, out := &in.PgpKey, &out.PgpKey + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountAPIKeyInitParameters. +func (in *ServiceAccountAPIKeyInitParameters) DeepCopy() *ServiceAccountAPIKeyInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountAPIKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountAPIKeyList) DeepCopyInto(out *ServiceAccountAPIKeyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccountAPIKey, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountAPIKeyList. +func (in *ServiceAccountAPIKeyList) DeepCopy() *ServiceAccountAPIKeyList { + if in == nil { + return nil + } + out := new(ServiceAccountAPIKeyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountAPIKeyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountAPIKeyObservation) DeepCopyInto(out *ServiceAccountAPIKeyObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptedSecretKey != nil { + in, out := &in.EncryptedSecretKey, &out.EncryptedSecretKey + *out = new(string) + **out = **in + } + if in.ExpiresAt != nil { + in, out := &in.ExpiresAt, &out.ExpiresAt + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyFingerprint != nil { + in, out := &in.KeyFingerprint, &out.KeyFingerprint + *out = new(string) + **out = **in + } + if in.OutputToLockbox != nil { + in, out := &in.OutputToLockbox, &out.OutputToLockbox + *out = make([]OutputToLockboxObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutputToLockboxVersionID != nil { + in, out := &in.OutputToLockboxVersionID, &out.OutputToLockboxVersionID + *out = new(string) + **out = **in + } + if in.PgpKey != nil { + in, out := &in.PgpKey, &out.PgpKey + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountAPIKeyObservation. +func (in *ServiceAccountAPIKeyObservation) DeepCopy() *ServiceAccountAPIKeyObservation { + if in == nil { + return nil + } + out := new(ServiceAccountAPIKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountAPIKeyParameters) DeepCopyInto(out *ServiceAccountAPIKeyParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExpiresAt != nil { + in, out := &in.ExpiresAt, &out.ExpiresAt + *out = new(string) + **out = **in + } + if in.OutputToLockbox != nil { + in, out := &in.OutputToLockbox, &out.OutputToLockbox + *out = make([]OutputToLockboxParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PgpKey != nil { + in, out := &in.PgpKey, &out.PgpKey + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountAPIKeyParameters. +func (in *ServiceAccountAPIKeyParameters) DeepCopy() *ServiceAccountAPIKeyParameters { + if in == nil { + return nil + } + out := new(ServiceAccountAPIKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountAPIKeySpec) DeepCopyInto(out *ServiceAccountAPIKeySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountAPIKeySpec. +func (in *ServiceAccountAPIKeySpec) DeepCopy() *ServiceAccountAPIKeySpec { + if in == nil { + return nil + } + out := new(ServiceAccountAPIKeySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountAPIKeyStatus) DeepCopyInto(out *ServiceAccountAPIKeyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountAPIKeyStatus. +func (in *ServiceAccountAPIKeyStatus) DeepCopy() *ServiceAccountAPIKeyStatus { + if in == nil { + return nil + } + out := new(ServiceAccountAPIKeyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMBinding) DeepCopyInto(out *ServiceAccountIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMBinding. +func (in *ServiceAccountIAMBinding) DeepCopy() *ServiceAccountIAMBinding { + if in == nil { + return nil + } + out := new(ServiceAccountIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMBindingInitParameters) DeepCopyInto(out *ServiceAccountIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMBindingInitParameters. +func (in *ServiceAccountIAMBindingInitParameters) DeepCopy() *ServiceAccountIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMBindingList) DeepCopyInto(out *ServiceAccountIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccountIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMBindingList. +func (in *ServiceAccountIAMBindingList) DeepCopy() *ServiceAccountIAMBindingList { + if in == nil { + return nil + } + out := new(ServiceAccountIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMBindingObservation) DeepCopyInto(out *ServiceAccountIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMBindingObservation. +func (in *ServiceAccountIAMBindingObservation) DeepCopy() *ServiceAccountIAMBindingObservation { + if in == nil { + return nil + } + out := new(ServiceAccountIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMBindingParameters) DeepCopyInto(out *ServiceAccountIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMBindingParameters. +func (in *ServiceAccountIAMBindingParameters) DeepCopy() *ServiceAccountIAMBindingParameters { + if in == nil { + return nil + } + out := new(ServiceAccountIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMBindingSpec) DeepCopyInto(out *ServiceAccountIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMBindingSpec. +func (in *ServiceAccountIAMBindingSpec) DeepCopy() *ServiceAccountIAMBindingSpec { + if in == nil { + return nil + } + out := new(ServiceAccountIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMBindingStatus) DeepCopyInto(out *ServiceAccountIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMBindingStatus. +func (in *ServiceAccountIAMBindingStatus) DeepCopy() *ServiceAccountIAMBindingStatus { + if in == nil { + return nil + } + out := new(ServiceAccountIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMMember) DeepCopyInto(out *ServiceAccountIAMMember) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMMember. +func (in *ServiceAccountIAMMember) DeepCopy() *ServiceAccountIAMMember { + if in == nil { + return nil + } + out := new(ServiceAccountIAMMember) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountIAMMember) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMMemberInitParameters) DeepCopyInto(out *ServiceAccountIAMMemberInitParameters) { + *out = *in + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMMemberInitParameters. +func (in *ServiceAccountIAMMemberInitParameters) DeepCopy() *ServiceAccountIAMMemberInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountIAMMemberInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMMemberList) DeepCopyInto(out *ServiceAccountIAMMemberList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccountIAMMember, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMMemberList. +func (in *ServiceAccountIAMMemberList) DeepCopy() *ServiceAccountIAMMemberList { + if in == nil { + return nil + } + out := new(ServiceAccountIAMMemberList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountIAMMemberList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMMemberObservation) DeepCopyInto(out *ServiceAccountIAMMemberObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMMemberObservation. +func (in *ServiceAccountIAMMemberObservation) DeepCopy() *ServiceAccountIAMMemberObservation { + if in == nil { + return nil + } + out := new(ServiceAccountIAMMemberObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMMemberParameters) DeepCopyInto(out *ServiceAccountIAMMemberParameters) { + *out = *in + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMMemberParameters. +func (in *ServiceAccountIAMMemberParameters) DeepCopy() *ServiceAccountIAMMemberParameters { + if in == nil { + return nil + } + out := new(ServiceAccountIAMMemberParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMMemberSpec) DeepCopyInto(out *ServiceAccountIAMMemberSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMMemberSpec. +func (in *ServiceAccountIAMMemberSpec) DeepCopy() *ServiceAccountIAMMemberSpec { + if in == nil { + return nil + } + out := new(ServiceAccountIAMMemberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMMemberStatus) DeepCopyInto(out *ServiceAccountIAMMemberStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMMemberStatus. +func (in *ServiceAccountIAMMemberStatus) DeepCopy() *ServiceAccountIAMMemberStatus { + if in == nil { + return nil + } + out := new(ServiceAccountIAMMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMPolicy) DeepCopyInto(out *ServiceAccountIAMPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMPolicy. +func (in *ServiceAccountIAMPolicy) DeepCopy() *ServiceAccountIAMPolicy { + if in == nil { + return nil + } + out := new(ServiceAccountIAMPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountIAMPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMPolicyInitParameters) DeepCopyInto(out *ServiceAccountIAMPolicyInitParameters) { + *out = *in + if in.PolicyData != nil { + in, out := &in.PolicyData, &out.PolicyData + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMPolicyInitParameters. +func (in *ServiceAccountIAMPolicyInitParameters) DeepCopy() *ServiceAccountIAMPolicyInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountIAMPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMPolicyList) DeepCopyInto(out *ServiceAccountIAMPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccountIAMPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMPolicyList. +func (in *ServiceAccountIAMPolicyList) DeepCopy() *ServiceAccountIAMPolicyList { + if in == nil { + return nil + } + out := new(ServiceAccountIAMPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountIAMPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMPolicyObservation) DeepCopyInto(out *ServiceAccountIAMPolicyObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PolicyData != nil { + in, out := &in.PolicyData, &out.PolicyData + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMPolicyObservation. +func (in *ServiceAccountIAMPolicyObservation) DeepCopy() *ServiceAccountIAMPolicyObservation { + if in == nil { + return nil + } + out := new(ServiceAccountIAMPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMPolicyParameters) DeepCopyInto(out *ServiceAccountIAMPolicyParameters) { + *out = *in + if in.PolicyData != nil { + in, out := &in.PolicyData, &out.PolicyData + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMPolicyParameters. +func (in *ServiceAccountIAMPolicyParameters) DeepCopy() *ServiceAccountIAMPolicyParameters { + if in == nil { + return nil + } + out := new(ServiceAccountIAMPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMPolicySpec) DeepCopyInto(out *ServiceAccountIAMPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMPolicySpec. +func (in *ServiceAccountIAMPolicySpec) DeepCopy() *ServiceAccountIAMPolicySpec { + if in == nil { + return nil + } + out := new(ServiceAccountIAMPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIAMPolicyStatus) DeepCopyInto(out *ServiceAccountIAMPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIAMPolicyStatus. +func (in *ServiceAccountIAMPolicyStatus) DeepCopy() *ServiceAccountIAMPolicyStatus { + if in == nil { + return nil + } + out := new(ServiceAccountIAMPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountInitParameters) DeepCopyInto(out *ServiceAccountInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountInitParameters. +func (in *ServiceAccountInitParameters) DeepCopy() *ServiceAccountInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKey) DeepCopyInto(out *ServiceAccountKey) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKey. +func (in *ServiceAccountKey) DeepCopy() *ServiceAccountKey { + if in == nil { + return nil + } + out := new(ServiceAccountKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountKey) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKeyInitParameters) DeepCopyInto(out *ServiceAccountKeyInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.KeyAlgorithm != nil { + in, out := &in.KeyAlgorithm, &out.KeyAlgorithm + *out = new(string) + **out = **in + } + if in.OutputToLockbox != nil { + in, out := &in.OutputToLockbox, &out.OutputToLockbox + *out = make([]ServiceAccountKeyOutputToLockboxInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PgpKey != nil { + in, out := &in.PgpKey, &out.PgpKey + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKeyInitParameters. +func (in *ServiceAccountKeyInitParameters) DeepCopy() *ServiceAccountKeyInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKeyList) DeepCopyInto(out *ServiceAccountKeyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccountKey, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKeyList. +func (in *ServiceAccountKeyList) DeepCopy() *ServiceAccountKeyList { + if in == nil { + return nil + } + out := new(ServiceAccountKeyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountKeyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKeyObservation) DeepCopyInto(out *ServiceAccountKeyObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptedPrivateKey != nil { + in, out := &in.EncryptedPrivateKey, &out.EncryptedPrivateKey + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyAlgorithm != nil { + in, out := &in.KeyAlgorithm, &out.KeyAlgorithm + *out = new(string) + **out = **in + } + if in.KeyFingerprint != nil { + in, out := &in.KeyFingerprint, &out.KeyFingerprint + *out = new(string) + **out = **in + } + if in.OutputToLockbox != nil { + in, out := &in.OutputToLockbox, &out.OutputToLockbox + *out = make([]ServiceAccountKeyOutputToLockboxObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutputToLockboxVersionID != nil { + in, out := &in.OutputToLockboxVersionID, &out.OutputToLockboxVersionID + *out = new(string) + **out = **in + } + if in.PgpKey != nil { + in, out := &in.PgpKey, &out.PgpKey + *out = new(string) + **out = **in + } + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKeyObservation. +func (in *ServiceAccountKeyObservation) DeepCopy() *ServiceAccountKeyObservation { + if in == nil { + return nil + } + out := new(ServiceAccountKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKeyOutputToLockboxInitParameters) DeepCopyInto(out *ServiceAccountKeyOutputToLockboxInitParameters) { + *out = *in + if in.EntryForPrivateKey != nil { + in, out := &in.EntryForPrivateKey, &out.EntryForPrivateKey + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKeyOutputToLockboxInitParameters. +func (in *ServiceAccountKeyOutputToLockboxInitParameters) DeepCopy() *ServiceAccountKeyOutputToLockboxInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountKeyOutputToLockboxInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKeyOutputToLockboxObservation) DeepCopyInto(out *ServiceAccountKeyOutputToLockboxObservation) { + *out = *in + if in.EntryForPrivateKey != nil { + in, out := &in.EntryForPrivateKey, &out.EntryForPrivateKey + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKeyOutputToLockboxObservation. +func (in *ServiceAccountKeyOutputToLockboxObservation) DeepCopy() *ServiceAccountKeyOutputToLockboxObservation { + if in == nil { + return nil + } + out := new(ServiceAccountKeyOutputToLockboxObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKeyOutputToLockboxParameters) DeepCopyInto(out *ServiceAccountKeyOutputToLockboxParameters) { + *out = *in + if in.EntryForPrivateKey != nil { + in, out := &in.EntryForPrivateKey, &out.EntryForPrivateKey + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKeyOutputToLockboxParameters. +func (in *ServiceAccountKeyOutputToLockboxParameters) DeepCopy() *ServiceAccountKeyOutputToLockboxParameters { + if in == nil { + return nil + } + out := new(ServiceAccountKeyOutputToLockboxParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKeyParameters) DeepCopyInto(out *ServiceAccountKeyParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.KeyAlgorithm != nil { + in, out := &in.KeyAlgorithm, &out.KeyAlgorithm + *out = new(string) + **out = **in + } + if in.OutputToLockbox != nil { + in, out := &in.OutputToLockbox, &out.OutputToLockbox + *out = make([]ServiceAccountKeyOutputToLockboxParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PgpKey != nil { + in, out := &in.PgpKey, &out.PgpKey + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKeyParameters. +func (in *ServiceAccountKeyParameters) DeepCopy() *ServiceAccountKeyParameters { + if in == nil { + return nil + } + out := new(ServiceAccountKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKeySpec) DeepCopyInto(out *ServiceAccountKeySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKeySpec. +func (in *ServiceAccountKeySpec) DeepCopy() *ServiceAccountKeySpec { + if in == nil { + return nil + } + out := new(ServiceAccountKeySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountKeyStatus) DeepCopyInto(out *ServiceAccountKeyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountKeyStatus. +func (in *ServiceAccountKeyStatus) DeepCopy() *ServiceAccountKeyStatus { + if in == nil { + return nil + } + out := new(ServiceAccountKeyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. +func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { + if in == nil { + return nil + } + out := new(ServiceAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountObservation) DeepCopyInto(out *ServiceAccountObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountObservation. +func (in *ServiceAccountObservation) DeepCopy() *ServiceAccountObservation { + if in == nil { + return nil + } + out := new(ServiceAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountParameters) DeepCopyInto(out *ServiceAccountParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountParameters. +func (in *ServiceAccountParameters) DeepCopy() *ServiceAccountParameters { + if in == nil { + return nil + } + out := new(ServiceAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSpec) DeepCopyInto(out *ServiceAccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSpec. +func (in *ServiceAccountSpec) DeepCopy() *ServiceAccountSpec { + if in == nil { + return nil + } + out := new(ServiceAccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKey) DeepCopyInto(out *ServiceAccountStaticAccessKey) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKey. +func (in *ServiceAccountStaticAccessKey) DeepCopy() *ServiceAccountStaticAccessKey { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountStaticAccessKey) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKeyInitParameters) DeepCopyInto(out *ServiceAccountStaticAccessKeyInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.OutputToLockbox != nil { + in, out := &in.OutputToLockbox, &out.OutputToLockbox + *out = make([]ServiceAccountStaticAccessKeyOutputToLockboxInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PgpKey != nil { + in, out := &in.PgpKey, &out.PgpKey + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKeyInitParameters. +func (in *ServiceAccountStaticAccessKeyInitParameters) DeepCopy() *ServiceAccountStaticAccessKeyInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKeyList) DeepCopyInto(out *ServiceAccountStaticAccessKeyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccountStaticAccessKey, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKeyList. +func (in *ServiceAccountStaticAccessKeyList) DeepCopy() *ServiceAccountStaticAccessKeyList { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKeyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountStaticAccessKeyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKeyObservation) DeepCopyInto(out *ServiceAccountStaticAccessKeyObservation) { + *out = *in + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptedSecretKey != nil { + in, out := &in.EncryptedSecretKey, &out.EncryptedSecretKey + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyFingerprint != nil { + in, out := &in.KeyFingerprint, &out.KeyFingerprint + *out = new(string) + **out = **in + } + if in.OutputToLockbox != nil { + in, out := &in.OutputToLockbox, &out.OutputToLockbox + *out = make([]ServiceAccountStaticAccessKeyOutputToLockboxObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutputToLockboxVersionID != nil { + in, out := &in.OutputToLockboxVersionID, &out.OutputToLockboxVersionID + *out = new(string) + **out = **in + } + if in.PgpKey != nil { + in, out := &in.PgpKey, &out.PgpKey + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKeyObservation. +func (in *ServiceAccountStaticAccessKeyObservation) DeepCopy() *ServiceAccountStaticAccessKeyObservation { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKeyOutputToLockboxInitParameters) DeepCopyInto(out *ServiceAccountStaticAccessKeyOutputToLockboxInitParameters) { + *out = *in + if in.EntryForAccessKey != nil { + in, out := &in.EntryForAccessKey, &out.EntryForAccessKey + *out = new(string) + **out = **in + } + if in.EntryForSecretKey != nil { + in, out := &in.EntryForSecretKey, &out.EntryForSecretKey + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKeyOutputToLockboxInitParameters. +func (in *ServiceAccountStaticAccessKeyOutputToLockboxInitParameters) DeepCopy() *ServiceAccountStaticAccessKeyOutputToLockboxInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKeyOutputToLockboxInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKeyOutputToLockboxObservation) DeepCopyInto(out *ServiceAccountStaticAccessKeyOutputToLockboxObservation) { + *out = *in + if in.EntryForAccessKey != nil { + in, out := &in.EntryForAccessKey, &out.EntryForAccessKey + *out = new(string) + **out = **in + } + if in.EntryForSecretKey != nil { + in, out := &in.EntryForSecretKey, &out.EntryForSecretKey + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKeyOutputToLockboxObservation. +func (in *ServiceAccountStaticAccessKeyOutputToLockboxObservation) DeepCopy() *ServiceAccountStaticAccessKeyOutputToLockboxObservation { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKeyOutputToLockboxObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKeyOutputToLockboxParameters) DeepCopyInto(out *ServiceAccountStaticAccessKeyOutputToLockboxParameters) { + *out = *in + if in.EntryForAccessKey != nil { + in, out := &in.EntryForAccessKey, &out.EntryForAccessKey + *out = new(string) + **out = **in + } + if in.EntryForSecretKey != nil { + in, out := &in.EntryForSecretKey, &out.EntryForSecretKey + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKeyOutputToLockboxParameters. +func (in *ServiceAccountStaticAccessKeyOutputToLockboxParameters) DeepCopy() *ServiceAccountStaticAccessKeyOutputToLockboxParameters { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKeyOutputToLockboxParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKeyParameters) DeepCopyInto(out *ServiceAccountStaticAccessKeyParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.OutputToLockbox != nil { + in, out := &in.OutputToLockbox, &out.OutputToLockbox + *out = make([]ServiceAccountStaticAccessKeyOutputToLockboxParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PgpKey != nil { + in, out := &in.PgpKey, &out.PgpKey + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKeyParameters. +func (in *ServiceAccountStaticAccessKeyParameters) DeepCopy() *ServiceAccountStaticAccessKeyParameters { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKeySpec) DeepCopyInto(out *ServiceAccountStaticAccessKeySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKeySpec. +func (in *ServiceAccountStaticAccessKeySpec) DeepCopy() *ServiceAccountStaticAccessKeySpec { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKeySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStaticAccessKeyStatus) DeepCopyInto(out *ServiceAccountStaticAccessKeyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStaticAccessKeyStatus. +func (in *ServiceAccountStaticAccessKeyStatus) DeepCopy() *ServiceAccountStaticAccessKeyStatus { + if in == nil { + return nil + } + out := new(ServiceAccountStaticAccessKeyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountStatus) DeepCopyInto(out *ServiceAccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountStatus. +func (in *ServiceAccountStatus) DeepCopy() *ServiceAccountStatus { + if in == nil { + return nil + } + out := new(ServiceAccountStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/iam/v1alpha1/zz_generated.resolvers.go b/apis/iam/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..943efb3 --- /dev/null +++ b/apis/iam/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,371 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + iam "github.com/tagesjump/provider-upjet-yc/config/iam" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this ServiceAccount. +func (mg *ServiceAccount) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ServiceAccountAPIKey. +func (mg *ServiceAccountAPIKey) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ServiceAccountIAMBinding. +func (mg *ServiceAccountIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.ForProvider.ServiceAccountRef, + Selector: mg.Spec.ForProvider.ServiceAccountSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Members") + } + mg.Spec.ForProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.InitProvider.ServiceAccountRef, + Selector: mg.Spec.InitProvider.ServiceAccountSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Members") + } + mg.Spec.InitProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ServiceAccountIAMMember. +func (mg *ServiceAccountIAMMember) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Member), + Extract: iam.ServiceAccountRefValue(), + Reference: mg.Spec.ForProvider.ServiceAccountRef, + Selector: mg.Spec.ForProvider.ServiceAccountSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Member") + } + mg.Spec.ForProvider.Member = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Member), + Extract: iam.ServiceAccountRefValue(), + Reference: mg.Spec.InitProvider.ServiceAccountRef, + Selector: mg.Spec.InitProvider.ServiceAccountSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Member") + } + mg.Spec.InitProvider.Member = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ServiceAccountIAMPolicy. +func (mg *ServiceAccountIAMPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ServiceAccountKey. +func (mg *ServiceAccountKey) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ServiceAccountStaticAccessKey. +func (mg *ServiceAccountStaticAccessKey) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &ServiceAccountList{}, + Managed: &ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/iam/v1alpha1/zz_groupversion_info.go b/apis/iam/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..ca35dc4 --- /dev/null +++ b/apis/iam/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=iam.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "iam.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/iam/v1alpha1/zz_serviceaccount_terraformed.go b/apis/iam/v1alpha1/zz_serviceaccount_terraformed.go new file mode 100755 index 0000000..79990e6 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccount_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServiceAccount +func (mg *ServiceAccount) GetTerraformResourceType() string { + return "yandex_iam_service_account" +} + +// GetConnectionDetailsMapping for this ServiceAccount +func (tr *ServiceAccount) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ServiceAccount +func (tr *ServiceAccount) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServiceAccount +func (tr *ServiceAccount) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServiceAccount +func (tr *ServiceAccount) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServiceAccount +func (tr *ServiceAccount) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServiceAccount +func (tr *ServiceAccount) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServiceAccount +func (tr *ServiceAccount) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServiceAccount +func (tr *ServiceAccount) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServiceAccount using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServiceAccount) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceAccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServiceAccount) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iam/v1alpha1/zz_serviceaccount_types.go b/apis/iam/v1alpha1/zz_serviceaccount_types.go new file mode 100755 index 0000000..e982261 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccount_types.go @@ -0,0 +1,121 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ServiceAccountInitParameters struct { + + // Description of the service account. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the service account will be created in. Defaults to the provider folder configuration. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` +} + +type ServiceAccountObservation struct { + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the service account. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the service account will be created in. Defaults to the provider folder configuration. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type ServiceAccountParameters struct { + + // Description of the service account. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the service account will be created in. Defaults to the provider folder configuration. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` +} + +// ServiceAccountSpec defines the desired state of ServiceAccount +type ServiceAccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceAccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceAccountInitParameters `json:"initProvider,omitempty"` +} + +// ServiceAccountStatus defines the observed state of ServiceAccount. +type ServiceAccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceAccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ServiceAccount is the Schema for the ServiceAccounts API. Allows management of a Yandex.Cloud IAM service account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ServiceAccount struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ServiceAccountSpec `json:"spec"` + Status ServiceAccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceAccountList contains a list of ServiceAccounts +type ServiceAccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceAccount `json:"items"` +} + +// Repository type metadata. +var ( + ServiceAccount_Kind = "ServiceAccount" + ServiceAccount_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServiceAccount_Kind}.String() + ServiceAccount_KindAPIVersion = ServiceAccount_Kind + "." + CRDGroupVersion.String() + ServiceAccount_GroupVersionKind = CRDGroupVersion.WithKind(ServiceAccount_Kind) +) + +func init() { + SchemeBuilder.Register(&ServiceAccount{}, &ServiceAccountList{}) +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountapikey_terraformed.go b/apis/iam/v1alpha1/zz_serviceaccountapikey_terraformed.go new file mode 100755 index 0000000..299a495 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountapikey_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServiceAccountAPIKey +func (mg *ServiceAccountAPIKey) GetTerraformResourceType() string { + return "yandex_iam_service_account_api_key" +} + +// GetConnectionDetailsMapping for this ServiceAccountAPIKey +func (tr *ServiceAccountAPIKey) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"secret_key": "status.atProvider.secretKey"} +} + +// GetObservation of this ServiceAccountAPIKey +func (tr *ServiceAccountAPIKey) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServiceAccountAPIKey +func (tr *ServiceAccountAPIKey) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServiceAccountAPIKey +func (tr *ServiceAccountAPIKey) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServiceAccountAPIKey +func (tr *ServiceAccountAPIKey) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServiceAccountAPIKey +func (tr *ServiceAccountAPIKey) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServiceAccountAPIKey +func (tr *ServiceAccountAPIKey) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServiceAccountAPIKey +func (tr *ServiceAccountAPIKey) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServiceAccountAPIKey using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServiceAccountAPIKey) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceAccountAPIKeyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServiceAccountAPIKey) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountapikey_types.go b/apis/iam/v1alpha1/zz_serviceaccountapikey_types.go new file mode 100755 index 0000000..f6ed74f --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountapikey_types.go @@ -0,0 +1,208 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OutputToLockboxInitParameters struct { + + // Entry where to store the value of secret_key. + // entry that will store the value of secret_key + EntryForSecretKey *string `json:"entryForSecretKey,omitempty" tf:"entry_for_secret_key,omitempty"` + + // ID of the Lockbox secret where to store the sensible values. + // secret where to add the version with the sensitive values + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` +} + +type OutputToLockboxObservation struct { + + // Entry where to store the value of secret_key. + // entry that will store the value of secret_key + EntryForSecretKey *string `json:"entryForSecretKey,omitempty" tf:"entry_for_secret_key,omitempty"` + + // ID of the Lockbox secret where to store the sensible values. + // secret where to add the version with the sensitive values + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` +} + +type OutputToLockboxParameters struct { + + // Entry where to store the value of secret_key. + // entry that will store the value of secret_key + // +kubebuilder:validation:Optional + EntryForSecretKey *string `json:"entryForSecretKey" tf:"entry_for_secret_key,omitempty"` + + // ID of the Lockbox secret where to store the sensible values. + // secret where to add the version with the sensitive values + // +kubebuilder:validation:Optional + SecretID *string `json:"secretId" tf:"secret_id,omitempty"` +} + +type ServiceAccountAPIKeyInitParameters struct { + + // The description of the key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The key will be no longer valid after expiration timestamp. + ExpiresAt *string `json:"expiresAt,omitempty" tf:"expires_at,omitempty"` + + // option to create a Lockbox secret version from sensitive outputs + OutputToLockbox []OutputToLockboxInitParameters `json:"outputToLockbox,omitempty" tf:"output_to_lockbox,omitempty"` + + // An optional PGP key to encrypt the resulting secret key material. May either be a base64-encoded public key or a keybase username in the form keybase:keybaseusername. + PgpKey *string `json:"pgpKey,omitempty" tf:"pgp_key,omitempty"` + + // The scope of the key. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // ID of the service account to an API key for. + // +crossplane:generate:reference:type=ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` +} + +type ServiceAccountAPIKeyObservation struct { + + // Creation timestamp of the static access key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The description of the key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The encrypted secret key, base64 encoded. This is only populated when pgp_key is supplied. + EncryptedSecretKey *string `json:"encryptedSecretKey,omitempty" tf:"encrypted_secret_key,omitempty"` + + // The key will be no longer valid after expiration timestamp. + ExpiresAt *string `json:"expiresAt,omitempty" tf:"expires_at,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The fingerprint of the PGP key used to encrypt the secret key. This is only populated when pgp_key is supplied. + KeyFingerprint *string `json:"keyFingerprint,omitempty" tf:"key_fingerprint,omitempty"` + + // option to create a Lockbox secret version from sensitive outputs + OutputToLockbox []OutputToLockboxObservation `json:"outputToLockbox,omitempty" tf:"output_to_lockbox,omitempty"` + + // ID of the Lockbox secret version that contains the value of secret_key. This is only populated when output_to_lockbox is supplied. This version will be destroyed when the IAM key is destroyed, or when output_to_lockbox is removed. + // version generated, that will contain the sensitive outputs + OutputToLockboxVersionID *string `json:"outputToLockboxVersionId,omitempty" tf:"output_to_lockbox_version_id,omitempty"` + + // An optional PGP key to encrypt the resulting secret key material. May either be a base64-encoded public key or a keybase username in the form keybase:keybaseusername. + PgpKey *string `json:"pgpKey,omitempty" tf:"pgp_key,omitempty"` + + // The scope of the key. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // ID of the service account to an API key for. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type ServiceAccountAPIKeyParameters struct { + + // The description of the key. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The key will be no longer valid after expiration timestamp. + // +kubebuilder:validation:Optional + ExpiresAt *string `json:"expiresAt,omitempty" tf:"expires_at,omitempty"` + + // option to create a Lockbox secret version from sensitive outputs + // +kubebuilder:validation:Optional + OutputToLockbox []OutputToLockboxParameters `json:"outputToLockbox,omitempty" tf:"output_to_lockbox,omitempty"` + + // An optional PGP key to encrypt the resulting secret key material. May either be a base64-encoded public key or a keybase username in the form keybase:keybaseusername. + // +kubebuilder:validation:Optional + PgpKey *string `json:"pgpKey,omitempty" tf:"pgp_key,omitempty"` + + // The scope of the key. + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // ID of the service account to an API key for. + // +crossplane:generate:reference:type=ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` +} + +// ServiceAccountAPIKeySpec defines the desired state of ServiceAccountAPIKey +type ServiceAccountAPIKeySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceAccountAPIKeyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceAccountAPIKeyInitParameters `json:"initProvider,omitempty"` +} + +// ServiceAccountAPIKeyStatus defines the observed state of ServiceAccountAPIKey. +type ServiceAccountAPIKeyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceAccountAPIKeyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ServiceAccountAPIKey is the Schema for the ServiceAccountAPIKeys API. Allows management of a Yandex.Cloud IAM service account API key. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ServiceAccountAPIKey struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ServiceAccountAPIKeySpec `json:"spec"` + Status ServiceAccountAPIKeyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceAccountAPIKeyList contains a list of ServiceAccountAPIKeys +type ServiceAccountAPIKeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceAccountAPIKey `json:"items"` +} + +// Repository type metadata. +var ( + ServiceAccountAPIKey_Kind = "ServiceAccountAPIKey" + ServiceAccountAPIKey_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServiceAccountAPIKey_Kind}.String() + ServiceAccountAPIKey_KindAPIVersion = ServiceAccountAPIKey_Kind + "." + CRDGroupVersion.String() + ServiceAccountAPIKey_GroupVersionKind = CRDGroupVersion.WithKind(ServiceAccountAPIKey_Kind) +) + +func init() { + SchemeBuilder.Register(&ServiceAccountAPIKey{}, &ServiceAccountAPIKeyList{}) +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountiambinding_terraformed.go b/apis/iam/v1alpha1/zz_serviceaccountiambinding_terraformed.go new file mode 100755 index 0000000..ad0705c --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServiceAccountIAMBinding +func (mg *ServiceAccountIAMBinding) GetTerraformResourceType() string { + return "yandex_iam_service_account_iam_binding" +} + +// GetConnectionDetailsMapping for this ServiceAccountIAMBinding +func (tr *ServiceAccountIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ServiceAccountIAMBinding +func (tr *ServiceAccountIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServiceAccountIAMBinding +func (tr *ServiceAccountIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServiceAccountIAMBinding +func (tr *ServiceAccountIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServiceAccountIAMBinding +func (tr *ServiceAccountIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServiceAccountIAMBinding +func (tr *ServiceAccountIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServiceAccountIAMBinding +func (tr *ServiceAccountIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServiceAccountIAMBinding +func (tr *ServiceAccountIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServiceAccountIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServiceAccountIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceAccountIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServiceAccountIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountiambinding_types.go b/apis/iam/v1alpha1/zz_serviceaccountiambinding_types.go new file mode 100755 index 0000000..dafaec4 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountiambinding_types.go @@ -0,0 +1,164 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ServiceAccountIAMBindingInitParameters struct { + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. Only one yandex_iam_service_account_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The service account ID to apply a binding to. + // +crossplane:generate:reference:type=ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // References to ServiceAccount to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type ServiceAccountIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. Only one yandex_iam_service_account_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The service account ID to apply a binding to. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type ServiceAccountIAMBindingParameters struct { + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. Only one yandex_iam_service_account_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The service account ID to apply a binding to. + // +crossplane:generate:reference:type=ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // References to ServiceAccount to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// ServiceAccountIAMBindingSpec defines the desired state of ServiceAccountIAMBinding +type ServiceAccountIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceAccountIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceAccountIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// ServiceAccountIAMBindingStatus defines the observed state of ServiceAccountIAMBinding. +type ServiceAccountIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceAccountIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ServiceAccountIAMBinding is the Schema for the ServiceAccountIAMBindings API. Allows management of a single IAM binding for a Yandex IAM service account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ServiceAccountIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec ServiceAccountIAMBindingSpec `json:"spec"` + Status ServiceAccountIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceAccountIAMBindingList contains a list of ServiceAccountIAMBindings +type ServiceAccountIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceAccountIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + ServiceAccountIAMBinding_Kind = "ServiceAccountIAMBinding" + ServiceAccountIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServiceAccountIAMBinding_Kind}.String() + ServiceAccountIAMBinding_KindAPIVersion = ServiceAccountIAMBinding_Kind + "." + CRDGroupVersion.String() + ServiceAccountIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(ServiceAccountIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&ServiceAccountIAMBinding{}, &ServiceAccountIAMBindingList{}) +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountiammember_terraformed.go b/apis/iam/v1alpha1/zz_serviceaccountiammember_terraformed.go new file mode 100755 index 0000000..105f3a3 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountiammember_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServiceAccountIAMMember +func (mg *ServiceAccountIAMMember) GetTerraformResourceType() string { + return "yandex_iam_service_account_iam_member" +} + +// GetConnectionDetailsMapping for this ServiceAccountIAMMember +func (tr *ServiceAccountIAMMember) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ServiceAccountIAMMember +func (tr *ServiceAccountIAMMember) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServiceAccountIAMMember +func (tr *ServiceAccountIAMMember) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServiceAccountIAMMember +func (tr *ServiceAccountIAMMember) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServiceAccountIAMMember +func (tr *ServiceAccountIAMMember) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServiceAccountIAMMember +func (tr *ServiceAccountIAMMember) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServiceAccountIAMMember +func (tr *ServiceAccountIAMMember) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServiceAccountIAMMember +func (tr *ServiceAccountIAMMember) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServiceAccountIAMMember using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServiceAccountIAMMember) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceAccountIAMMemberParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServiceAccountIAMMember) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountiammember_types.go b/apis/iam/v1alpha1/zz_serviceaccountiammember_types.go new file mode 100755 index 0000000..0f9a6a1 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountiammember_types.go @@ -0,0 +1,161 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ServiceAccountIAMMemberInitParameters struct { + + // Identity that will be granted the privilege in role. Entry can have one of the following values: + // +crossplane:generate:reference:type=ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // The role that should be applied. Only one yandex_iam_service_account_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The service account ID to apply a policy to. + // +crossplane:generate:reference:type=ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // Reference to a ServiceAccount to populate member. + // +kubebuilder:validation:Optional + ServiceAccountRef *v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate member. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type ServiceAccountIAMMemberObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identity that will be granted the privilege in role. Entry can have one of the following values: + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // The role that should be applied. Only one yandex_iam_service_account_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The service account ID to apply a policy to. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type ServiceAccountIAMMemberParameters struct { + + // Identity that will be granted the privilege in role. Entry can have one of the following values: + // +crossplane:generate:reference:type=ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +kubebuilder:validation:Optional + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // The role that should be applied. Only one yandex_iam_service_account_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The service account ID to apply a policy to. + // +crossplane:generate:reference:type=ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // Reference to a ServiceAccount to populate member. + // +kubebuilder:validation:Optional + ServiceAccountRef *v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate member. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// ServiceAccountIAMMemberSpec defines the desired state of ServiceAccountIAMMember +type ServiceAccountIAMMemberSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceAccountIAMMemberParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceAccountIAMMemberInitParameters `json:"initProvider,omitempty"` +} + +// ServiceAccountIAMMemberStatus defines the observed state of ServiceAccountIAMMember. +type ServiceAccountIAMMemberStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceAccountIAMMemberObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ServiceAccountIAMMember is the Schema for the ServiceAccountIAMMembers API. Allows management of a single member for a single IAM binding for a Yandex IAM service account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ServiceAccountIAMMember struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec ServiceAccountIAMMemberSpec `json:"spec"` + Status ServiceAccountIAMMemberStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceAccountIAMMemberList contains a list of ServiceAccountIAMMembers +type ServiceAccountIAMMemberList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceAccountIAMMember `json:"items"` +} + +// Repository type metadata. +var ( + ServiceAccountIAMMember_Kind = "ServiceAccountIAMMember" + ServiceAccountIAMMember_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServiceAccountIAMMember_Kind}.String() + ServiceAccountIAMMember_KindAPIVersion = ServiceAccountIAMMember_Kind + "." + CRDGroupVersion.String() + ServiceAccountIAMMember_GroupVersionKind = CRDGroupVersion.WithKind(ServiceAccountIAMMember_Kind) +) + +func init() { + SchemeBuilder.Register(&ServiceAccountIAMMember{}, &ServiceAccountIAMMemberList{}) +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountiampolicy_terraformed.go b/apis/iam/v1alpha1/zz_serviceaccountiampolicy_terraformed.go new file mode 100755 index 0000000..c729a78 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountiampolicy_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServiceAccountIAMPolicy +func (mg *ServiceAccountIAMPolicy) GetTerraformResourceType() string { + return "yandex_iam_service_account_iam_policy" +} + +// GetConnectionDetailsMapping for this ServiceAccountIAMPolicy +func (tr *ServiceAccountIAMPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ServiceAccountIAMPolicy +func (tr *ServiceAccountIAMPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServiceAccountIAMPolicy +func (tr *ServiceAccountIAMPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServiceAccountIAMPolicy +func (tr *ServiceAccountIAMPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServiceAccountIAMPolicy +func (tr *ServiceAccountIAMPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServiceAccountIAMPolicy +func (tr *ServiceAccountIAMPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServiceAccountIAMPolicy +func (tr *ServiceAccountIAMPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServiceAccountIAMPolicy +func (tr *ServiceAccountIAMPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServiceAccountIAMPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServiceAccountIAMPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceAccountIAMPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServiceAccountIAMPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountiampolicy_types.go b/apis/iam/v1alpha1/zz_serviceaccountiampolicy_types.go new file mode 100755 index 0000000..97ff18f --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountiampolicy_types.go @@ -0,0 +1,120 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ServiceAccountIAMPolicyInitParameters struct { + + // The policy data generated by a yandex_iam_policy data source. + PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` + + // The service account ID to apply a policy to. + // +crossplane:generate:reference:type=ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` +} + +type ServiceAccountIAMPolicyObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The policy data generated by a yandex_iam_policy data source. + PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` + + // The service account ID to apply a policy to. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type ServiceAccountIAMPolicyParameters struct { + + // The policy data generated by a yandex_iam_policy data source. + // +kubebuilder:validation:Optional + PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` + + // The service account ID to apply a policy to. + // +crossplane:generate:reference:type=ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` +} + +// ServiceAccountIAMPolicySpec defines the desired state of ServiceAccountIAMPolicy +type ServiceAccountIAMPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceAccountIAMPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceAccountIAMPolicyInitParameters `json:"initProvider,omitempty"` +} + +// ServiceAccountIAMPolicyStatus defines the observed state of ServiceAccountIAMPolicy. +type ServiceAccountIAMPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceAccountIAMPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ServiceAccountIAMPolicy is the Schema for the ServiceAccountIAMPolicys API. Allows management of the IAM policy for a Yandex IAM service account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ServiceAccountIAMPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyData) || (has(self.initProvider) && has(self.initProvider.policyData))",message="spec.forProvider.policyData is a required parameter" + Spec ServiceAccountIAMPolicySpec `json:"spec"` + Status ServiceAccountIAMPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceAccountIAMPolicyList contains a list of ServiceAccountIAMPolicys +type ServiceAccountIAMPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceAccountIAMPolicy `json:"items"` +} + +// Repository type metadata. +var ( + ServiceAccountIAMPolicy_Kind = "ServiceAccountIAMPolicy" + ServiceAccountIAMPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServiceAccountIAMPolicy_Kind}.String() + ServiceAccountIAMPolicy_KindAPIVersion = ServiceAccountIAMPolicy_Kind + "." + CRDGroupVersion.String() + ServiceAccountIAMPolicy_GroupVersionKind = CRDGroupVersion.WithKind(ServiceAccountIAMPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&ServiceAccountIAMPolicy{}, &ServiceAccountIAMPolicyList{}) +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountkey_terraformed.go b/apis/iam/v1alpha1/zz_serviceaccountkey_terraformed.go new file mode 100755 index 0000000..2ec27da --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountkey_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServiceAccountKey +func (mg *ServiceAccountKey) GetTerraformResourceType() string { + return "yandex_iam_service_account_key" +} + +// GetConnectionDetailsMapping for this ServiceAccountKey +func (tr *ServiceAccountKey) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"private_key": "status.atProvider.privateKey"} +} + +// GetObservation of this ServiceAccountKey +func (tr *ServiceAccountKey) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServiceAccountKey +func (tr *ServiceAccountKey) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServiceAccountKey +func (tr *ServiceAccountKey) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServiceAccountKey +func (tr *ServiceAccountKey) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServiceAccountKey +func (tr *ServiceAccountKey) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServiceAccountKey +func (tr *ServiceAccountKey) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServiceAccountKey +func (tr *ServiceAccountKey) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServiceAccountKey using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServiceAccountKey) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceAccountKeyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServiceAccountKey) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountkey_types.go b/apis/iam/v1alpha1/zz_serviceaccountkey_types.go new file mode 100755 index 0000000..d104cc2 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountkey_types.go @@ -0,0 +1,211 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ServiceAccountKeyInitParameters struct { + + // The description of the key pair. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The output format of the keys. PEM_FILE is the default format. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The algorithm used to generate the key. RSA_2048 is the default algorithm. Valid values are listed in the API reference. + KeyAlgorithm *string `json:"keyAlgorithm,omitempty" tf:"key_algorithm,omitempty"` + + // option to create a Lockbox secret version from sensitive outputs + OutputToLockbox []ServiceAccountKeyOutputToLockboxInitParameters `json:"outputToLockbox,omitempty" tf:"output_to_lockbox,omitempty"` + + // An optional PGP key to encrypt the resulting private key material. May either be a base64-encoded public key or a keybase username in the form keybase:keybaseusername. + PgpKey *string `json:"pgpKey,omitempty" tf:"pgp_key,omitempty"` + + // ID of the service account to create a pair for. + // +crossplane:generate:reference:type=ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` +} + +type ServiceAccountKeyObservation struct { + + // Creation timestamp of the static access key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The description of the key pair. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The encrypted private key, base64 encoded. This is only populated when pgp_key is supplied. + EncryptedPrivateKey *string `json:"encryptedPrivateKey,omitempty" tf:"encrypted_private_key,omitempty"` + + // The output format of the keys. PEM_FILE is the default format. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The algorithm used to generate the key. RSA_2048 is the default algorithm. Valid values are listed in the API reference. + KeyAlgorithm *string `json:"keyAlgorithm,omitempty" tf:"key_algorithm,omitempty"` + + // The fingerprint of the PGP key used to encrypt the private key. This is only populated when pgp_key is supplied. + KeyFingerprint *string `json:"keyFingerprint,omitempty" tf:"key_fingerprint,omitempty"` + + // option to create a Lockbox secret version from sensitive outputs + OutputToLockbox []ServiceAccountKeyOutputToLockboxObservation `json:"outputToLockbox,omitempty" tf:"output_to_lockbox,omitempty"` + + // ID of the Lockbox secret version that contains the value of private_key. This is only populated when output_to_lockbox is supplied. This version will be destroyed when the IAM key is destroyed, or when output_to_lockbox is removed. + // version generated, that will contain the sensitive outputs + OutputToLockboxVersionID *string `json:"outputToLockboxVersionId,omitempty" tf:"output_to_lockbox_version_id,omitempty"` + + // An optional PGP key to encrypt the resulting private key material. May either be a base64-encoded public key or a keybase username in the form keybase:keybaseusername. + PgpKey *string `json:"pgpKey,omitempty" tf:"pgp_key,omitempty"` + + // The public key. + PublicKey *string `json:"publicKey,omitempty" tf:"public_key,omitempty"` + + // ID of the service account to create a pair for. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type ServiceAccountKeyOutputToLockboxInitParameters struct { + + // Entry where to store the value of private_key. + // entry that will store the value of private_key + EntryForPrivateKey *string `json:"entryForPrivateKey,omitempty" tf:"entry_for_private_key,omitempty"` + + // ID of the Lockbox secret where to store the sensible values. + // secret where to add the version with the sensitive values + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` +} + +type ServiceAccountKeyOutputToLockboxObservation struct { + + // Entry where to store the value of private_key. + // entry that will store the value of private_key + EntryForPrivateKey *string `json:"entryForPrivateKey,omitempty" tf:"entry_for_private_key,omitempty"` + + // ID of the Lockbox secret where to store the sensible values. + // secret where to add the version with the sensitive values + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` +} + +type ServiceAccountKeyOutputToLockboxParameters struct { + + // Entry where to store the value of private_key. + // entry that will store the value of private_key + // +kubebuilder:validation:Optional + EntryForPrivateKey *string `json:"entryForPrivateKey" tf:"entry_for_private_key,omitempty"` + + // ID of the Lockbox secret where to store the sensible values. + // secret where to add the version with the sensitive values + // +kubebuilder:validation:Optional + SecretID *string `json:"secretId" tf:"secret_id,omitempty"` +} + +type ServiceAccountKeyParameters struct { + + // The description of the key pair. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The output format of the keys. PEM_FILE is the default format. + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The algorithm used to generate the key. RSA_2048 is the default algorithm. Valid values are listed in the API reference. + // +kubebuilder:validation:Optional + KeyAlgorithm *string `json:"keyAlgorithm,omitempty" tf:"key_algorithm,omitempty"` + + // option to create a Lockbox secret version from sensitive outputs + // +kubebuilder:validation:Optional + OutputToLockbox []ServiceAccountKeyOutputToLockboxParameters `json:"outputToLockbox,omitempty" tf:"output_to_lockbox,omitempty"` + + // An optional PGP key to encrypt the resulting private key material. May either be a base64-encoded public key or a keybase username in the form keybase:keybaseusername. + // +kubebuilder:validation:Optional + PgpKey *string `json:"pgpKey,omitempty" tf:"pgp_key,omitempty"` + + // ID of the service account to create a pair for. + // +crossplane:generate:reference:type=ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` +} + +// ServiceAccountKeySpec defines the desired state of ServiceAccountKey +type ServiceAccountKeySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceAccountKeyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceAccountKeyInitParameters `json:"initProvider,omitempty"` +} + +// ServiceAccountKeyStatus defines the observed state of ServiceAccountKey. +type ServiceAccountKeyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceAccountKeyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ServiceAccountKey is the Schema for the ServiceAccountKeys API. Allows management of a Yandex.Cloud IAM service account key. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ServiceAccountKey struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ServiceAccountKeySpec `json:"spec"` + Status ServiceAccountKeyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceAccountKeyList contains a list of ServiceAccountKeys +type ServiceAccountKeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceAccountKey `json:"items"` +} + +// Repository type metadata. +var ( + ServiceAccountKey_Kind = "ServiceAccountKey" + ServiceAccountKey_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServiceAccountKey_Kind}.String() + ServiceAccountKey_KindAPIVersion = ServiceAccountKey_Kind + "." + CRDGroupVersion.String() + ServiceAccountKey_GroupVersionKind = CRDGroupVersion.WithKind(ServiceAccountKey_Kind) +) + +func init() { + SchemeBuilder.Register(&ServiceAccountKey{}, &ServiceAccountKeyList{}) +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountstaticaccesskey_terraformed.go b/apis/iam/v1alpha1/zz_serviceaccountstaticaccesskey_terraformed.go new file mode 100755 index 0000000..cae9192 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountstaticaccesskey_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServiceAccountStaticAccessKey +func (mg *ServiceAccountStaticAccessKey) GetTerraformResourceType() string { + return "yandex_iam_service_account_static_access_key" +} + +// GetConnectionDetailsMapping for this ServiceAccountStaticAccessKey +func (tr *ServiceAccountStaticAccessKey) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"secret_key": "status.atProvider.secretKey"} +} + +// GetObservation of this ServiceAccountStaticAccessKey +func (tr *ServiceAccountStaticAccessKey) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServiceAccountStaticAccessKey +func (tr *ServiceAccountStaticAccessKey) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServiceAccountStaticAccessKey +func (tr *ServiceAccountStaticAccessKey) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServiceAccountStaticAccessKey +func (tr *ServiceAccountStaticAccessKey) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServiceAccountStaticAccessKey +func (tr *ServiceAccountStaticAccessKey) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServiceAccountStaticAccessKey +func (tr *ServiceAccountStaticAccessKey) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServiceAccountStaticAccessKey +func (tr *ServiceAccountStaticAccessKey) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServiceAccountStaticAccessKey using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServiceAccountStaticAccessKey) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceAccountStaticAccessKeyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServiceAccountStaticAccessKey) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iam/v1alpha1/zz_serviceaccountstaticaccesskey_types.go b/apis/iam/v1alpha1/zz_serviceaccountstaticaccesskey_types.go new file mode 100755 index 0000000..1cb8ac2 --- /dev/null +++ b/apis/iam/v1alpha1/zz_serviceaccountstaticaccesskey_types.go @@ -0,0 +1,204 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ServiceAccountStaticAccessKeyInitParameters struct { + + // The description of the service account static key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // option to create a Lockbox secret version from sensitive outputs + OutputToLockbox []ServiceAccountStaticAccessKeyOutputToLockboxInitParameters `json:"outputToLockbox,omitempty" tf:"output_to_lockbox,omitempty"` + + // An optional PGP key to encrypt the resulting secret key material. May either be a base64-encoded public key or a keybase username in the form keybase:keybaseusername. + PgpKey *string `json:"pgpKey,omitempty" tf:"pgp_key,omitempty"` + + // ID of the service account which is used to get a static key. + // +crossplane:generate:reference:type=ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` +} + +type ServiceAccountStaticAccessKeyObservation struct { + + // ID of the static access key. This is only populated when output_to_lockbox is not provided. + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // Creation timestamp of the static access key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The description of the service account static key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The encrypted secret, base64 encoded. This is only populated when pgp_key is supplied. + EncryptedSecretKey *string `json:"encryptedSecretKey,omitempty" tf:"encrypted_secret_key,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The fingerprint of the PGP key used to encrypt the secret key. This is only populated when pgp_key is supplied. + KeyFingerprint *string `json:"keyFingerprint,omitempty" tf:"key_fingerprint,omitempty"` + + // option to create a Lockbox secret version from sensitive outputs + OutputToLockbox []ServiceAccountStaticAccessKeyOutputToLockboxObservation `json:"outputToLockbox,omitempty" tf:"output_to_lockbox,omitempty"` + + // ID of the Lockbox secret version that contains the values of access_key and secret_key. This is only populated when output_to_lockbox is supplied. This version will be destroyed when the IAM key is destroyed, or when output_to_lockbox is removed. + // version generated, that will contain the sensitive outputs + OutputToLockboxVersionID *string `json:"outputToLockboxVersionId,omitempty" tf:"output_to_lockbox_version_id,omitempty"` + + // An optional PGP key to encrypt the resulting secret key material. May either be a base64-encoded public key or a keybase username in the form keybase:keybaseusername. + PgpKey *string `json:"pgpKey,omitempty" tf:"pgp_key,omitempty"` + + // ID of the service account which is used to get a static key. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type ServiceAccountStaticAccessKeyOutputToLockboxInitParameters struct { + + // Entry where to store the value of access_key. + // entry that will store the value of access_key + EntryForAccessKey *string `json:"entryForAccessKey,omitempty" tf:"entry_for_access_key,omitempty"` + + // Entry where to store the value of secret_key. + // entry that will store the value of secret_key + EntryForSecretKey *string `json:"entryForSecretKey,omitempty" tf:"entry_for_secret_key,omitempty"` + + // ID of the Lockbox secret where to store the sensible values. + // secret where to add the version with the sensitive values + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` +} + +type ServiceAccountStaticAccessKeyOutputToLockboxObservation struct { + + // Entry where to store the value of access_key. + // entry that will store the value of access_key + EntryForAccessKey *string `json:"entryForAccessKey,omitempty" tf:"entry_for_access_key,omitempty"` + + // Entry where to store the value of secret_key. + // entry that will store the value of secret_key + EntryForSecretKey *string `json:"entryForSecretKey,omitempty" tf:"entry_for_secret_key,omitempty"` + + // ID of the Lockbox secret where to store the sensible values. + // secret where to add the version with the sensitive values + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` +} + +type ServiceAccountStaticAccessKeyOutputToLockboxParameters struct { + + // Entry where to store the value of access_key. + // entry that will store the value of access_key + // +kubebuilder:validation:Optional + EntryForAccessKey *string `json:"entryForAccessKey" tf:"entry_for_access_key,omitempty"` + + // Entry where to store the value of secret_key. + // entry that will store the value of secret_key + // +kubebuilder:validation:Optional + EntryForSecretKey *string `json:"entryForSecretKey" tf:"entry_for_secret_key,omitempty"` + + // ID of the Lockbox secret where to store the sensible values. + // secret where to add the version with the sensitive values + // +kubebuilder:validation:Optional + SecretID *string `json:"secretId" tf:"secret_id,omitempty"` +} + +type ServiceAccountStaticAccessKeyParameters struct { + + // The description of the service account static key. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // option to create a Lockbox secret version from sensitive outputs + // +kubebuilder:validation:Optional + OutputToLockbox []ServiceAccountStaticAccessKeyOutputToLockboxParameters `json:"outputToLockbox,omitempty" tf:"output_to_lockbox,omitempty"` + + // An optional PGP key to encrypt the resulting secret key material. May either be a base64-encoded public key or a keybase username in the form keybase:keybaseusername. + // +kubebuilder:validation:Optional + PgpKey *string `json:"pgpKey,omitempty" tf:"pgp_key,omitempty"` + + // ID of the service account which is used to get a static key. + // +crossplane:generate:reference:type=ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` +} + +// ServiceAccountStaticAccessKeySpec defines the desired state of ServiceAccountStaticAccessKey +type ServiceAccountStaticAccessKeySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceAccountStaticAccessKeyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceAccountStaticAccessKeyInitParameters `json:"initProvider,omitempty"` +} + +// ServiceAccountStaticAccessKeyStatus defines the observed state of ServiceAccountStaticAccessKey. +type ServiceAccountStaticAccessKeyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceAccountStaticAccessKeyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ServiceAccountStaticAccessKey is the Schema for the ServiceAccountStaticAccessKeys API. Allows management of a Yandex.Cloud IAM service account static access key. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ServiceAccountStaticAccessKey struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ServiceAccountStaticAccessKeySpec `json:"spec"` + Status ServiceAccountStaticAccessKeyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceAccountStaticAccessKeyList contains a list of ServiceAccountStaticAccessKeys +type ServiceAccountStaticAccessKeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceAccountStaticAccessKey `json:"items"` +} + +// Repository type metadata. +var ( + ServiceAccountStaticAccessKey_Kind = "ServiceAccountStaticAccessKey" + ServiceAccountStaticAccessKey_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServiceAccountStaticAccessKey_Kind}.String() + ServiceAccountStaticAccessKey_KindAPIVersion = ServiceAccountStaticAccessKey_Kind + "." + CRDGroupVersion.String() + ServiceAccountStaticAccessKey_GroupVersionKind = CRDGroupVersion.WithKind(ServiceAccountStaticAccessKey_Kind) +) + +func init() { + SchemeBuilder.Register(&ServiceAccountStaticAccessKey{}, &ServiceAccountStaticAccessKeyList{}) +} diff --git a/apis/iot/v1alpha1/zz_corebroker_terraformed.go b/apis/iot/v1alpha1/zz_corebroker_terraformed.go index 873cc66..599a4f9 100755 --- a/apis/iot/v1alpha1/zz_corebroker_terraformed.go +++ b/apis/iot/v1alpha1/zz_corebroker_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this CoreBroker func (mg *CoreBroker) GetTerraformResourceType() string { - return "yandex_iot_core_broker" + return "yandex_iot_core_broker" } // GetConnectionDetailsMapping for this CoreBroker func (tr *CoreBroker) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this CoreBroker func (tr *CoreBroker) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this CoreBroker func (tr *CoreBroker) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this CoreBroker func (tr *CoreBroker) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this CoreBroker func (tr *CoreBroker) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this CoreBroker func (tr *CoreBroker) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this CoreBroker func (tr *CoreBroker) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this CoreBroker func (tr *CoreBroker) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this CoreBroker using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *CoreBroker) LateInitialize(attrs []byte) (bool, error) { - params := &CoreBrokerParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &CoreBrokerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *CoreBroker) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/iot/v1alpha1/zz_corebroker_types.go b/apis/iot/v1alpha1/zz_corebroker_types.go index 1f94d26..62ae21b 100755 --- a/apis/iot/v1alpha1/zz_corebroker_types.go +++ b/apis/iot/v1alpha1/zz_corebroker_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,174 +7,159 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type CoreBrokerInitParameters struct { + // A set of certificate's fingerprints for the IoT Core Broker + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` -// A set of certificate's fingerprints for the IoT Core Broker -// +listType=set -Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` - -// Description of the IoT Core Broker -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the IoT Core Broker + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Folder ID for the IoT Core Broker -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the IoT Core Broker + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of key/value label pairs to assign to the IoT Core Broker. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the IoT Core Broker. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging for IoT Core Broker -LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging for IoT Core Broker + LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// IoT Core Broker name used to define broker -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // IoT Core Broker name used to define broker + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type CoreBrokerObservation struct { + // A set of certificate's fingerprints for the IoT Core Broker + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` -// A set of certificate's fingerprints for the IoT Core Broker -// +listType=set -Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` - -// Creation timestamp of the IoT Core Broker -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Creation timestamp of the IoT Core Broker + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Description of the IoT Core Broker -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the IoT Core Broker + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Folder ID for the IoT Core Broker -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the IoT Core Broker + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the IoT Core Broker. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the IoT Core Broker. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging for IoT Core Broker -LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging for IoT Core Broker + LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// IoT Core Broker name used to define broker -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // IoT Core Broker name used to define broker + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type CoreBrokerParameters struct { + // A set of certificate's fingerprints for the IoT Core Broker + // +kubebuilder:validation:Optional + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` -// A set of certificate's fingerprints for the IoT Core Broker -// +kubebuilder:validation:Optional -// +listType=set -Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` - -// Description of the IoT Core Broker -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the IoT Core Broker + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Folder ID for the IoT Core Broker -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the IoT Core Broker + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of key/value label pairs to assign to the IoT Core Broker. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the IoT Core Broker. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging for IoT Core Broker -// +kubebuilder:validation:Optional -LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging for IoT Core Broker + // +kubebuilder:validation:Optional + LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// IoT Core Broker name used to define broker -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // IoT Core Broker name used to define broker + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type LogOptionsInitParameters struct { + // Is logging for broker disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging for broker disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to default log group for specified folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` - -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type LogOptionsObservation struct { + // Is logging for broker disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging for broker disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to default log group for specified folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` - -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type LogOptionsParameters struct { + // Is logging for broker disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging for broker disabled -// +kubebuilder:validation:Optional -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + // Log entries are written to default log group for specified folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to default log group for specified folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Log entries are written to specified log group -// +kubebuilder:validation:Optional -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` - -// Minimum log entry level -// +kubebuilder:validation:Optional -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } // CoreBrokerSpec defines the desired state of CoreBroker type CoreBrokerSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider CoreBrokerParameters `json:"forProvider"` + ForProvider CoreBrokerParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -187,20 +170,19 @@ type CoreBrokerSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider CoreBrokerInitParameters `json:"initProvider,omitempty"` + InitProvider CoreBrokerInitParameters `json:"initProvider,omitempty"` } // CoreBrokerStatus defines the observed state of CoreBroker. type CoreBrokerStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider CoreBrokerObservation `json:"atProvider,omitempty"` + AtProvider CoreBrokerObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // CoreBroker is the Schema for the CoreBrokers API. Allows management of a Yandex.Cloud IoT Core Broker. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -210,9 +192,9 @@ type CoreBrokerStatus struct { type CoreBroker struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec CoreBrokerSpec `json:"spec"` - Status CoreBrokerStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec CoreBrokerSpec `json:"spec"` + Status CoreBrokerStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/iot/v1alpha1/zz_coredevice_terraformed.go b/apis/iot/v1alpha1/zz_coredevice_terraformed.go index feb3858..dddc4b5 100755 --- a/apis/iot/v1alpha1/zz_coredevice_terraformed.go +++ b/apis/iot/v1alpha1/zz_coredevice_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this CoreDevice func (mg *CoreDevice) GetTerraformResourceType() string { - return "yandex_iot_core_device" + return "yandex_iot_core_device" } // GetConnectionDetailsMapping for this CoreDevice func (tr *CoreDevice) GetConnectionDetailsMapping() map[string]string { - return map[string]string{ "passwords[*]": "passwordsSecretRef[*]", } + return map[string]string{"passwords[*]": "passwordsSecretRef[*]"} } // GetObservation of this CoreDevice func (tr *CoreDevice) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this CoreDevice func (tr *CoreDevice) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this CoreDevice func (tr *CoreDevice) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this CoreDevice func (tr *CoreDevice) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this CoreDevice func (tr *CoreDevice) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this CoreDevice func (tr *CoreDevice) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this CoreDevice func (tr *CoreDevice) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this CoreDevice using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *CoreDevice) LateInitialize(attrs []byte) (bool, error) { - params := &CoreDeviceParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &CoreDeviceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *CoreDevice) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/iot/v1alpha1/zz_coredevice_types.go b/apis/iot/v1alpha1/zz_coredevice_types.go index 0b0044c..78be7c2 100755 --- a/apis/iot/v1alpha1/zz_coredevice_types.go +++ b/apis/iot/v1alpha1/zz_coredevice_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,97 +7,88 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type CoreDeviceInitParameters struct { + // A set of key/value aliases pairs to assign to the IoT Core Device + // +mapType=granular + Aliases map[string]*string `json:"aliases,omitempty" tf:"aliases,omitempty"` -// A set of key/value aliases pairs to assign to the IoT Core Device -// +mapType=granular -Aliases map[string]*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + // A set of certificate's fingerprints for the IoT Core Device + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` -// A set of certificate's fingerprints for the IoT Core Device -// +listType=set -Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + // Description of the IoT Core Device + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the IoT Core Device -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // IoT Core Device name used to define device + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// IoT Core Device name used to define device -Name *string `json:"name,omitempty" tf:"name,omitempty"` + Passwords []*string `json:"passwordsSecretRef,omitempty" tf:"-"` -Passwords []*string `json:"passwordsSecretRef,omitempty" tf:"-"` - -// IoT Core Registry ID for the IoT Core Device -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + // IoT Core Registry ID for the IoT Core Device + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` } - type CoreDeviceObservation struct { + // A set of key/value aliases pairs to assign to the IoT Core Device + // +mapType=granular + Aliases map[string]*string `json:"aliases,omitempty" tf:"aliases,omitempty"` -// A set of key/value aliases pairs to assign to the IoT Core Device -// +mapType=granular -Aliases map[string]*string `json:"aliases,omitempty" tf:"aliases,omitempty"` - -// A set of certificate's fingerprints for the IoT Core Device -// +listType=set -Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + // A set of certificate's fingerprints for the IoT Core Device + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` -// Creation timestamp of the IoT Core Device -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Creation timestamp of the IoT Core Device + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Description of the IoT Core Device -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the IoT Core Device + Description *string `json:"description,omitempty" tf:"description,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// IoT Core Device name used to define device -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // IoT Core Device name used to define device + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// IoT Core Registry ID for the IoT Core Device -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + // IoT Core Registry ID for the IoT Core Device + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` } - type CoreDeviceParameters struct { + // A set of key/value aliases pairs to assign to the IoT Core Device + // +kubebuilder:validation:Optional + // +mapType=granular + Aliases map[string]*string `json:"aliases,omitempty" tf:"aliases,omitempty"` -// A set of key/value aliases pairs to assign to the IoT Core Device -// +kubebuilder:validation:Optional -// +mapType=granular -Aliases map[string]*string `json:"aliases,omitempty" tf:"aliases,omitempty"` - -// A set of certificate's fingerprints for the IoT Core Device -// +kubebuilder:validation:Optional -// +listType=set -Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + // A set of certificate's fingerprints for the IoT Core Device + // +kubebuilder:validation:Optional + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` -// Description of the IoT Core Device -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the IoT Core Device + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// IoT Core Device name used to define device -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // IoT Core Device name used to define device + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A set of passwords's id for the IoT Core Device -// +kubebuilder:validation:Optional -PasswordsSecretRef *[]v1.SecretKeySelector `json:"passwordsSecretRef,omitempty" tf:"-"` + // A set of passwords's id for the IoT Core Device + // +kubebuilder:validation:Optional + PasswordsSecretRef *[]v1.SecretKeySelector `json:"passwordsSecretRef,omitempty" tf:"-"` -// IoT Core Registry ID for the IoT Core Device -// +kubebuilder:validation:Optional -RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + // IoT Core Registry ID for the IoT Core Device + // +kubebuilder:validation:Optional + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` } // CoreDeviceSpec defines the desired state of CoreDevice type CoreDeviceSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider CoreDeviceParameters `json:"forProvider"` + ForProvider CoreDeviceParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -110,20 +99,19 @@ type CoreDeviceSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider CoreDeviceInitParameters `json:"initProvider,omitempty"` + InitProvider CoreDeviceInitParameters `json:"initProvider,omitempty"` } // CoreDeviceStatus defines the observed state of CoreDevice. type CoreDeviceStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider CoreDeviceObservation `json:"atProvider,omitempty"` + AtProvider CoreDeviceObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // CoreDevice is the Schema for the CoreDevices API. Allows management of a Yandex.Cloud IoT Core Device. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -133,10 +121,10 @@ type CoreDeviceStatus struct { type CoreDevice struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.registryId) || (has(self.initProvider) && has(self.initProvider.registryId))",message="spec.forProvider.registryId is a required parameter" - Spec CoreDeviceSpec `json:"spec"` - Status CoreDeviceStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.registryId) || (has(self.initProvider) && has(self.initProvider.registryId))",message="spec.forProvider.registryId is a required parameter" + Spec CoreDeviceSpec `json:"spec"` + Status CoreDeviceStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/iot/v1alpha1/zz_coreregistry_terraformed.go b/apis/iot/v1alpha1/zz_coreregistry_terraformed.go index 109e849..e1be877 100755 --- a/apis/iot/v1alpha1/zz_coreregistry_terraformed.go +++ b/apis/iot/v1alpha1/zz_coreregistry_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this CoreRegistry func (mg *CoreRegistry) GetTerraformResourceType() string { - return "yandex_iot_core_registry" + return "yandex_iot_core_registry" } // GetConnectionDetailsMapping for this CoreRegistry func (tr *CoreRegistry) GetConnectionDetailsMapping() map[string]string { - return map[string]string{ "passwords[*]": "passwordsSecretRef[*]", } + return map[string]string{"passwords[*]": "passwordsSecretRef[*]"} } // GetObservation of this CoreRegistry func (tr *CoreRegistry) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this CoreRegistry func (tr *CoreRegistry) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this CoreRegistry func (tr *CoreRegistry) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this CoreRegistry func (tr *CoreRegistry) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this CoreRegistry func (tr *CoreRegistry) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this CoreRegistry func (tr *CoreRegistry) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this CoreRegistry func (tr *CoreRegistry) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this CoreRegistry using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *CoreRegistry) LateInitialize(attrs []byte) (bool, error) { - params := &CoreRegistryParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &CoreRegistryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *CoreRegistry) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/iot/v1alpha1/zz_coreregistry_types.go b/apis/iot/v1alpha1/zz_coreregistry_types.go index b6dec51..703d04c 100755 --- a/apis/iot/v1alpha1/zz_coreregistry_types.go +++ b/apis/iot/v1alpha1/zz_coreregistry_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,180 +7,165 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type CoreRegistryInitParameters struct { + // A set of certificate's fingerprints for the IoT Core Registry + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` -// A set of certificate's fingerprints for the IoT Core Registry -// +listType=set -Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` - -// Description of the IoT Core Registry -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the IoT Core Registry + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Folder ID for the IoT Core Registry -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the IoT Core Registry + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of key/value label pairs to assign to the IoT Core Registry. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the IoT Core Registry. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging for IoT Core Registry -LogOptions []CoreRegistryLogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging for IoT Core Registry + LogOptions []CoreRegistryLogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// IoT Core Device name used to define registry -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // IoT Core Device name used to define registry + Name *string `json:"name,omitempty" tf:"name,omitempty"` -Passwords []*string `json:"passwordsSecretRef,omitempty" tf:"-"` + Passwords []*string `json:"passwordsSecretRef,omitempty" tf:"-"` } - type CoreRegistryLogOptionsInitParameters struct { + // Is logging for registry disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging for registry disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// Log entries are written to default log group for specified folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type CoreRegistryLogOptionsObservation struct { + // Is logging for registry disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging for registry disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// Log entries are written to default log group for specified folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type CoreRegistryLogOptionsParameters struct { + // Is logging for registry disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging for registry disabled -// +kubebuilder:validation:Optional -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + // Log entries are written to default log group for specified folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to default log group for specified folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Log entries are written to specified log group -// +kubebuilder:validation:Optional -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` - -// Minimum log entry level -// +kubebuilder:validation:Optional -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type CoreRegistryObservation struct { + // A set of certificate's fingerprints for the IoT Core Registry + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` -// A set of certificate's fingerprints for the IoT Core Registry -// +listType=set -Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + // Creation timestamp of the IoT Core Registry + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Creation timestamp of the IoT Core Registry -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Description of the IoT Core Registry + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the IoT Core Registry -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Folder ID for the IoT Core Registry + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder ID for the IoT Core Registry -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // A set of key/value label pairs to assign to the IoT Core Registry. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the IoT Core Registry. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Options for logging for IoT Core Registry + LogOptions []CoreRegistryLogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Options for logging for IoT Core Registry -LogOptions []CoreRegistryLogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` - -// IoT Core Device name used to define registry -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // IoT Core Device name used to define registry + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type CoreRegistryParameters struct { + // A set of certificate's fingerprints for the IoT Core Registry + // +kubebuilder:validation:Optional + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` -// A set of certificate's fingerprints for the IoT Core Registry -// +kubebuilder:validation:Optional -// +listType=set -Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + // Description of the IoT Core Registry + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the IoT Core Registry -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Folder ID for the IoT Core Registry + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder ID for the IoT Core Registry -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // A set of key/value label pairs to assign to the IoT Core Registry. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the IoT Core Registry. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Options for logging for IoT Core Registry + // +kubebuilder:validation:Optional + LogOptions []CoreRegistryLogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Options for logging for IoT Core Registry -// +kubebuilder:validation:Optional -LogOptions []CoreRegistryLogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // IoT Core Device name used to define registry + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// IoT Core Device name used to define registry -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// A set of passwords's id for the IoT Core Registry -// +kubebuilder:validation:Optional -PasswordsSecretRef *[]v1.SecretKeySelector `json:"passwordsSecretRef,omitempty" tf:"-"` + // A set of passwords's id for the IoT Core Registry + // +kubebuilder:validation:Optional + PasswordsSecretRef *[]v1.SecretKeySelector `json:"passwordsSecretRef,omitempty" tf:"-"` } // CoreRegistrySpec defines the desired state of CoreRegistry type CoreRegistrySpec struct { v1.ResourceSpec `json:",inline"` - ForProvider CoreRegistryParameters `json:"forProvider"` + ForProvider CoreRegistryParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -193,20 +176,19 @@ type CoreRegistrySpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider CoreRegistryInitParameters `json:"initProvider,omitempty"` + InitProvider CoreRegistryInitParameters `json:"initProvider,omitempty"` } // CoreRegistryStatus defines the observed state of CoreRegistry. type CoreRegistryStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider CoreRegistryObservation `json:"atProvider,omitempty"` + AtProvider CoreRegistryObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // CoreRegistry is the Schema for the CoreRegistrys API. Allows management of a Yandex.Cloud IoT Core Registry. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -216,9 +198,9 @@ type CoreRegistryStatus struct { type CoreRegistry struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec CoreRegistrySpec `json:"spec"` - Status CoreRegistryStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec CoreRegistrySpec `json:"spec"` + Status CoreRegistryStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/iot/v1alpha1/zz_generated.conversion_hubs.go b/apis/iot/v1alpha1/zz_generated.conversion_hubs.go index 82c3f94..ff0317a 100755 --- a/apis/iot/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/iot/v1alpha1/zz_generated.conversion_hubs.go @@ -1,16 +1,12 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *CoreBroker) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *CoreBroker) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *CoreDevice) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *CoreRegistry) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *CoreDevice) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *CoreRegistry) Hub() {} diff --git a/apis/iot/v1alpha1/zz_generated.deepcopy.go b/apis/iot/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..87e9e6d --- /dev/null +++ b/apis/iot/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1167 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBroker) DeepCopyInto(out *CoreBroker) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBroker. +func (in *CoreBroker) DeepCopy() *CoreBroker { + if in == nil { + return nil + } + out := new(CoreBroker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreBroker) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerInitParameters) DeepCopyInto(out *CoreBrokerInitParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerInitParameters. +func (in *CoreBrokerInitParameters) DeepCopy() *CoreBrokerInitParameters { + if in == nil { + return nil + } + out := new(CoreBrokerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerList) DeepCopyInto(out *CoreBrokerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CoreBroker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerList. +func (in *CoreBrokerList) DeepCopy() *CoreBrokerList { + if in == nil { + return nil + } + out := new(CoreBrokerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreBrokerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerObservation) DeepCopyInto(out *CoreBrokerObservation) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerObservation. +func (in *CoreBrokerObservation) DeepCopy() *CoreBrokerObservation { + if in == nil { + return nil + } + out := new(CoreBrokerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerParameters) DeepCopyInto(out *CoreBrokerParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerParameters. +func (in *CoreBrokerParameters) DeepCopy() *CoreBrokerParameters { + if in == nil { + return nil + } + out := new(CoreBrokerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerSpec) DeepCopyInto(out *CoreBrokerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerSpec. +func (in *CoreBrokerSpec) DeepCopy() *CoreBrokerSpec { + if in == nil { + return nil + } + out := new(CoreBrokerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerStatus) DeepCopyInto(out *CoreBrokerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerStatus. +func (in *CoreBrokerStatus) DeepCopy() *CoreBrokerStatus { + if in == nil { + return nil + } + out := new(CoreBrokerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDevice) DeepCopyInto(out *CoreDevice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDevice. +func (in *CoreDevice) DeepCopy() *CoreDevice { + if in == nil { + return nil + } + out := new(CoreDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreDevice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceInitParameters) DeepCopyInto(out *CoreDeviceInitParameters) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Passwords != nil { + in, out := &in.Passwords, &out.Passwords + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceInitParameters. +func (in *CoreDeviceInitParameters) DeepCopy() *CoreDeviceInitParameters { + if in == nil { + return nil + } + out := new(CoreDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceList) DeepCopyInto(out *CoreDeviceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CoreDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceList. +func (in *CoreDeviceList) DeepCopy() *CoreDeviceList { + if in == nil { + return nil + } + out := new(CoreDeviceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreDeviceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceObservation) DeepCopyInto(out *CoreDeviceObservation) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceObservation. +func (in *CoreDeviceObservation) DeepCopy() *CoreDeviceObservation { + if in == nil { + return nil + } + out := new(CoreDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceParameters) DeepCopyInto(out *CoreDeviceParameters) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordsSecretRef != nil { + in, out := &in.PasswordsSecretRef, &out.PasswordsSecretRef + *out = new([]v1.SecretKeySelector) + if **in != nil { + in, out := *in, *out + *out = make([]v1.SecretKeySelector, len(*in)) + copy(*out, *in) + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceParameters. +func (in *CoreDeviceParameters) DeepCopy() *CoreDeviceParameters { + if in == nil { + return nil + } + out := new(CoreDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceSpec) DeepCopyInto(out *CoreDeviceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceSpec. +func (in *CoreDeviceSpec) DeepCopy() *CoreDeviceSpec { + if in == nil { + return nil + } + out := new(CoreDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceStatus) DeepCopyInto(out *CoreDeviceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceStatus. +func (in *CoreDeviceStatus) DeepCopy() *CoreDeviceStatus { + if in == nil { + return nil + } + out := new(CoreDeviceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistry) DeepCopyInto(out *CoreRegistry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistry. +func (in *CoreRegistry) DeepCopy() *CoreRegistry { + if in == nil { + return nil + } + out := new(CoreRegistry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreRegistry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryInitParameters) DeepCopyInto(out *CoreRegistryInitParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]CoreRegistryLogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Passwords != nil { + in, out := &in.Passwords, &out.Passwords + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryInitParameters. +func (in *CoreRegistryInitParameters) DeepCopy() *CoreRegistryInitParameters { + if in == nil { + return nil + } + out := new(CoreRegistryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryList) DeepCopyInto(out *CoreRegistryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CoreRegistry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryList. +func (in *CoreRegistryList) DeepCopy() *CoreRegistryList { + if in == nil { + return nil + } + out := new(CoreRegistryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreRegistryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryLogOptionsInitParameters) DeepCopyInto(out *CoreRegistryLogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryLogOptionsInitParameters. +func (in *CoreRegistryLogOptionsInitParameters) DeepCopy() *CoreRegistryLogOptionsInitParameters { + if in == nil { + return nil + } + out := new(CoreRegistryLogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryLogOptionsObservation) DeepCopyInto(out *CoreRegistryLogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryLogOptionsObservation. +func (in *CoreRegistryLogOptionsObservation) DeepCopy() *CoreRegistryLogOptionsObservation { + if in == nil { + return nil + } + out := new(CoreRegistryLogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryLogOptionsParameters) DeepCopyInto(out *CoreRegistryLogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryLogOptionsParameters. +func (in *CoreRegistryLogOptionsParameters) DeepCopy() *CoreRegistryLogOptionsParameters { + if in == nil { + return nil + } + out := new(CoreRegistryLogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryObservation) DeepCopyInto(out *CoreRegistryObservation) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]CoreRegistryLogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryObservation. +func (in *CoreRegistryObservation) DeepCopy() *CoreRegistryObservation { + if in == nil { + return nil + } + out := new(CoreRegistryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryParameters) DeepCopyInto(out *CoreRegistryParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]CoreRegistryLogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordsSecretRef != nil { + in, out := &in.PasswordsSecretRef, &out.PasswordsSecretRef + *out = new([]v1.SecretKeySelector) + if **in != nil { + in, out := *in, *out + *out = make([]v1.SecretKeySelector, len(*in)) + copy(*out, *in) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryParameters. +func (in *CoreRegistryParameters) DeepCopy() *CoreRegistryParameters { + if in == nil { + return nil + } + out := new(CoreRegistryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistrySpec) DeepCopyInto(out *CoreRegistrySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistrySpec. +func (in *CoreRegistrySpec) DeepCopy() *CoreRegistrySpec { + if in == nil { + return nil + } + out := new(CoreRegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryStatus) DeepCopyInto(out *CoreRegistryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryStatus. +func (in *CoreRegistryStatus) DeepCopy() *CoreRegistryStatus { + if in == nil { + return nil + } + out := new(CoreRegistryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsInitParameters) DeepCopyInto(out *LogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsInitParameters. +func (in *LogOptionsInitParameters) DeepCopy() *LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsObservation) DeepCopyInto(out *LogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsObservation. +func (in *LogOptionsObservation) DeepCopy() *LogOptionsObservation { + if in == nil { + return nil + } + out := new(LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsParameters) DeepCopyInto(out *LogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsParameters. +func (in *LogOptionsParameters) DeepCopy() *LogOptionsParameters { + if in == nil { + return nil + } + out := new(LogOptionsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/iot/v1alpha1/zz_generated.resolvers.go b/apis/iot/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..ec47e67 --- /dev/null +++ b/apis/iot/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,95 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this CoreBroker. +func (mg *CoreBroker) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this CoreRegistry. +func (mg *CoreRegistry) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/iot/v1alpha1/zz_groupversion_info.go b/apis/iot/v1alpha1/zz_groupversion_info.go index 705398d..d7367eb 100755 --- a/apis/iot/v1alpha1/zz_groupversion_info.go +++ b/apis/iot/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/kms/v1alpha1/zz_asymmetricencryptionkey_terraformed.go b/apis/kms/v1alpha1/zz_asymmetricencryptionkey_terraformed.go new file mode 100755 index 0000000..cc6c421 --- /dev/null +++ b/apis/kms/v1alpha1/zz_asymmetricencryptionkey_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AsymmetricEncryptionKey +func (mg *AsymmetricEncryptionKey) GetTerraformResourceType() string { + return "yandex_kms_asymmetric_encryption_key" +} + +// GetConnectionDetailsMapping for this AsymmetricEncryptionKey +func (tr *AsymmetricEncryptionKey) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AsymmetricEncryptionKey +func (tr *AsymmetricEncryptionKey) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AsymmetricEncryptionKey +func (tr *AsymmetricEncryptionKey) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AsymmetricEncryptionKey +func (tr *AsymmetricEncryptionKey) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AsymmetricEncryptionKey +func (tr *AsymmetricEncryptionKey) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AsymmetricEncryptionKey +func (tr *AsymmetricEncryptionKey) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AsymmetricEncryptionKey +func (tr *AsymmetricEncryptionKey) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AsymmetricEncryptionKey +func (tr *AsymmetricEncryptionKey) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AsymmetricEncryptionKey using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AsymmetricEncryptionKey) LateInitialize(attrs []byte) (bool, error) { + params := &AsymmetricEncryptionKeyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AsymmetricEncryptionKey) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/kms/v1alpha1/zz_asymmetricencryptionkey_types.go b/apis/kms/v1alpha1/zz_asymmetricencryptionkey_types.go new file mode 100755 index 0000000..a535571 --- /dev/null +++ b/apis/kms/v1alpha1/zz_asymmetricencryptionkey_types.go @@ -0,0 +1,165 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AsymmetricEncryptionKeyInitParameters struct { + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of the key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Encryption algorithm to be used with a new key. The default value is RSA_2048_ENC_OAEP_SHA_256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the key. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AsymmetricEncryptionKeyObservation struct { + + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of the key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Encryption algorithm to be used with a new key. The default value is RSA_2048_ENC_OAEP_SHA_256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the key. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The status of the key. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type AsymmetricEncryptionKeyParameters struct { + + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of the key. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Encryption algorithm to be used with a new key. The default value is RSA_2048_ENC_OAEP_SHA_256. + // +kubebuilder:validation:Optional + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the key. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the key. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +// AsymmetricEncryptionKeySpec defines the desired state of AsymmetricEncryptionKey +type AsymmetricEncryptionKeySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AsymmetricEncryptionKeyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AsymmetricEncryptionKeyInitParameters `json:"initProvider,omitempty"` +} + +// AsymmetricEncryptionKeyStatus defines the observed state of AsymmetricEncryptionKey. +type AsymmetricEncryptionKeyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AsymmetricEncryptionKeyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// AsymmetricEncryptionKey is the Schema for the AsymmetricEncryptionKeys API. Creates a Yandex KMS asymmetric encryption key that can be used for cryptographic operation. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type AsymmetricEncryptionKey struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AsymmetricEncryptionKeySpec `json:"spec"` + Status AsymmetricEncryptionKeyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AsymmetricEncryptionKeyList contains a list of AsymmetricEncryptionKeys +type AsymmetricEncryptionKeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AsymmetricEncryptionKey `json:"items"` +} + +// Repository type metadata. +var ( + AsymmetricEncryptionKey_Kind = "AsymmetricEncryptionKey" + AsymmetricEncryptionKey_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AsymmetricEncryptionKey_Kind}.String() + AsymmetricEncryptionKey_KindAPIVersion = AsymmetricEncryptionKey_Kind + "." + CRDGroupVersion.String() + AsymmetricEncryptionKey_GroupVersionKind = CRDGroupVersion.WithKind(AsymmetricEncryptionKey_Kind) +) + +func init() { + SchemeBuilder.Register(&AsymmetricEncryptionKey{}, &AsymmetricEncryptionKeyList{}) +} diff --git a/apis/kms/v1alpha1/zz_asymmetricencryptionkeyiambinding_terraformed.go b/apis/kms/v1alpha1/zz_asymmetricencryptionkeyiambinding_terraformed.go new file mode 100755 index 0000000..b12fabd --- /dev/null +++ b/apis/kms/v1alpha1/zz_asymmetricencryptionkeyiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AsymmetricEncryptionKeyIAMBinding +func (mg *AsymmetricEncryptionKeyIAMBinding) GetTerraformResourceType() string { + return "yandex_kms_asymmetric_encryption_key_iam_binding" +} + +// GetConnectionDetailsMapping for this AsymmetricEncryptionKeyIAMBinding +func (tr *AsymmetricEncryptionKeyIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AsymmetricEncryptionKeyIAMBinding +func (tr *AsymmetricEncryptionKeyIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AsymmetricEncryptionKeyIAMBinding +func (tr *AsymmetricEncryptionKeyIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AsymmetricEncryptionKeyIAMBinding +func (tr *AsymmetricEncryptionKeyIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AsymmetricEncryptionKeyIAMBinding +func (tr *AsymmetricEncryptionKeyIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AsymmetricEncryptionKeyIAMBinding +func (tr *AsymmetricEncryptionKeyIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AsymmetricEncryptionKeyIAMBinding +func (tr *AsymmetricEncryptionKeyIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AsymmetricEncryptionKeyIAMBinding +func (tr *AsymmetricEncryptionKeyIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AsymmetricEncryptionKeyIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AsymmetricEncryptionKeyIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &AsymmetricEncryptionKeyIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AsymmetricEncryptionKeyIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kms/v1alpha1/zz_asymmetricencryptionkeyiambinding_types.go b/apis/kms/v1alpha1/zz_asymmetricencryptionkeyiambinding_types.go new file mode 100755 index 0000000..ebbbc07 --- /dev/null +++ b/apis/kms/v1alpha1/zz_asymmetricencryptionkeyiambinding_types.go @@ -0,0 +1,165 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AsymmetricEncryptionKeyIAMBindingInitParameters struct { + + // The Yandex Key Management Service Asymmetric Encryption Key ID to apply a binding to. + // +crossplane:generate:reference:type=AsymmetricEncryptionKey + AsymmetricEncryptionKeyID *string `json:"asymmetricEncryptionKeyId,omitempty" tf:"asymmetric_encryption_key_id,omitempty"` + + // Reference to a AsymmetricEncryptionKey to populate asymmetricEncryptionKeyId. + // +kubebuilder:validation:Optional + AsymmetricEncryptionKeyIDRef *v1.Reference `json:"asymmetricEncryptionKeyIdRef,omitempty" tf:"-"` + + // Selector for a AsymmetricEncryptionKey to populate asymmetricEncryptionKeyId. + // +kubebuilder:validation:Optional + AsymmetricEncryptionKeyIDSelector *v1.Selector `json:"asymmetricEncryptionKeyIdSelector,omitempty" tf:"-"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type AsymmetricEncryptionKeyIAMBindingObservation struct { + + // The Yandex Key Management Service Asymmetric Encryption Key ID to apply a binding to. + AsymmetricEncryptionKeyID *string `json:"asymmetricEncryptionKeyId,omitempty" tf:"asymmetric_encryption_key_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type AsymmetricEncryptionKeyIAMBindingParameters struct { + + // The Yandex Key Management Service Asymmetric Encryption Key ID to apply a binding to. + // +crossplane:generate:reference:type=AsymmetricEncryptionKey + // +kubebuilder:validation:Optional + AsymmetricEncryptionKeyID *string `json:"asymmetricEncryptionKeyId,omitempty" tf:"asymmetric_encryption_key_id,omitempty"` + + // Reference to a AsymmetricEncryptionKey to populate asymmetricEncryptionKeyId. + // +kubebuilder:validation:Optional + AsymmetricEncryptionKeyIDRef *v1.Reference `json:"asymmetricEncryptionKeyIdRef,omitempty" tf:"-"` + + // Selector for a AsymmetricEncryptionKey to populate asymmetricEncryptionKeyId. + // +kubebuilder:validation:Optional + AsymmetricEncryptionKeyIDSelector *v1.Selector `json:"asymmetricEncryptionKeyIdSelector,omitempty" tf:"-"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// AsymmetricEncryptionKeyIAMBindingSpec defines the desired state of AsymmetricEncryptionKeyIAMBinding +type AsymmetricEncryptionKeyIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AsymmetricEncryptionKeyIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AsymmetricEncryptionKeyIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// AsymmetricEncryptionKeyIAMBindingStatus defines the observed state of AsymmetricEncryptionKeyIAMBinding. +type AsymmetricEncryptionKeyIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AsymmetricEncryptionKeyIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// AsymmetricEncryptionKeyIAMBinding is the Schema for the AsymmetricEncryptionKeyIAMBindings API. Allows management of a single IAM binding for a +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type AsymmetricEncryptionKeyIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec AsymmetricEncryptionKeyIAMBindingSpec `json:"spec"` + Status AsymmetricEncryptionKeyIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AsymmetricEncryptionKeyIAMBindingList contains a list of AsymmetricEncryptionKeyIAMBindings +type AsymmetricEncryptionKeyIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AsymmetricEncryptionKeyIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + AsymmetricEncryptionKeyIAMBinding_Kind = "AsymmetricEncryptionKeyIAMBinding" + AsymmetricEncryptionKeyIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AsymmetricEncryptionKeyIAMBinding_Kind}.String() + AsymmetricEncryptionKeyIAMBinding_KindAPIVersion = AsymmetricEncryptionKeyIAMBinding_Kind + "." + CRDGroupVersion.String() + AsymmetricEncryptionKeyIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(AsymmetricEncryptionKeyIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&AsymmetricEncryptionKeyIAMBinding{}, &AsymmetricEncryptionKeyIAMBindingList{}) +} diff --git a/apis/kms/v1alpha1/zz_asymmetricsignaturekey_terraformed.go b/apis/kms/v1alpha1/zz_asymmetricsignaturekey_terraformed.go new file mode 100755 index 0000000..9e62218 --- /dev/null +++ b/apis/kms/v1alpha1/zz_asymmetricsignaturekey_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AsymmetricSignatureKey +func (mg *AsymmetricSignatureKey) GetTerraformResourceType() string { + return "yandex_kms_asymmetric_signature_key" +} + +// GetConnectionDetailsMapping for this AsymmetricSignatureKey +func (tr *AsymmetricSignatureKey) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AsymmetricSignatureKey +func (tr *AsymmetricSignatureKey) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AsymmetricSignatureKey +func (tr *AsymmetricSignatureKey) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AsymmetricSignatureKey +func (tr *AsymmetricSignatureKey) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AsymmetricSignatureKey +func (tr *AsymmetricSignatureKey) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AsymmetricSignatureKey +func (tr *AsymmetricSignatureKey) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AsymmetricSignatureKey +func (tr *AsymmetricSignatureKey) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AsymmetricSignatureKey +func (tr *AsymmetricSignatureKey) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AsymmetricSignatureKey using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AsymmetricSignatureKey) LateInitialize(attrs []byte) (bool, error) { + params := &AsymmetricSignatureKeyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AsymmetricSignatureKey) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/kms/v1alpha1/zz_asymmetricsignaturekey_types.go b/apis/kms/v1alpha1/zz_asymmetricsignaturekey_types.go new file mode 100755 index 0000000..a82ba60 --- /dev/null +++ b/apis/kms/v1alpha1/zz_asymmetricsignaturekey_types.go @@ -0,0 +1,165 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AsymmetricSignatureKeyInitParameters struct { + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of the key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the key. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Signature algorithm to be used with a new key. The default value is RSA_2048_SIGN_PSS_SHA_256. + SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty" tf:"signature_algorithm,omitempty"` +} + +type AsymmetricSignatureKeyObservation struct { + + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of the key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the key. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Signature algorithm to be used with a new key. The default value is RSA_2048_SIGN_PSS_SHA_256. + SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty" tf:"signature_algorithm,omitempty"` + + // The status of the key. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type AsymmetricSignatureKeyParameters struct { + + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of the key. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the key. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the key. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Signature algorithm to be used with a new key. The default value is RSA_2048_SIGN_PSS_SHA_256. + // +kubebuilder:validation:Optional + SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty" tf:"signature_algorithm,omitempty"` +} + +// AsymmetricSignatureKeySpec defines the desired state of AsymmetricSignatureKey +type AsymmetricSignatureKeySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AsymmetricSignatureKeyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AsymmetricSignatureKeyInitParameters `json:"initProvider,omitempty"` +} + +// AsymmetricSignatureKeyStatus defines the observed state of AsymmetricSignatureKey. +type AsymmetricSignatureKeyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AsymmetricSignatureKeyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// AsymmetricSignatureKey is the Schema for the AsymmetricSignatureKeys API. Creates a Yandex KMS asymmetric signature key that can be used for cryptographic operation. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type AsymmetricSignatureKey struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AsymmetricSignatureKeySpec `json:"spec"` + Status AsymmetricSignatureKeyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AsymmetricSignatureKeyList contains a list of AsymmetricSignatureKeys +type AsymmetricSignatureKeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AsymmetricSignatureKey `json:"items"` +} + +// Repository type metadata. +var ( + AsymmetricSignatureKey_Kind = "AsymmetricSignatureKey" + AsymmetricSignatureKey_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AsymmetricSignatureKey_Kind}.String() + AsymmetricSignatureKey_KindAPIVersion = AsymmetricSignatureKey_Kind + "." + CRDGroupVersion.String() + AsymmetricSignatureKey_GroupVersionKind = CRDGroupVersion.WithKind(AsymmetricSignatureKey_Kind) +) + +func init() { + SchemeBuilder.Register(&AsymmetricSignatureKey{}, &AsymmetricSignatureKeyList{}) +} diff --git a/apis/kms/v1alpha1/zz_asymmetricsignaturekeyiambinding_terraformed.go b/apis/kms/v1alpha1/zz_asymmetricsignaturekeyiambinding_terraformed.go new file mode 100755 index 0000000..c1bb625 --- /dev/null +++ b/apis/kms/v1alpha1/zz_asymmetricsignaturekeyiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AsymmetricSignatureKeyIAMBinding +func (mg *AsymmetricSignatureKeyIAMBinding) GetTerraformResourceType() string { + return "yandex_kms_asymmetric_signature_key_iam_binding" +} + +// GetConnectionDetailsMapping for this AsymmetricSignatureKeyIAMBinding +func (tr *AsymmetricSignatureKeyIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AsymmetricSignatureKeyIAMBinding +func (tr *AsymmetricSignatureKeyIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AsymmetricSignatureKeyIAMBinding +func (tr *AsymmetricSignatureKeyIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AsymmetricSignatureKeyIAMBinding +func (tr *AsymmetricSignatureKeyIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AsymmetricSignatureKeyIAMBinding +func (tr *AsymmetricSignatureKeyIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AsymmetricSignatureKeyIAMBinding +func (tr *AsymmetricSignatureKeyIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AsymmetricSignatureKeyIAMBinding +func (tr *AsymmetricSignatureKeyIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AsymmetricSignatureKeyIAMBinding +func (tr *AsymmetricSignatureKeyIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AsymmetricSignatureKeyIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AsymmetricSignatureKeyIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &AsymmetricSignatureKeyIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AsymmetricSignatureKeyIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kms/v1alpha1/zz_asymmetricsignaturekeyiambinding_types.go b/apis/kms/v1alpha1/zz_asymmetricsignaturekeyiambinding_types.go new file mode 100755 index 0000000..4941a0a --- /dev/null +++ b/apis/kms/v1alpha1/zz_asymmetricsignaturekeyiambinding_types.go @@ -0,0 +1,165 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AsymmetricSignatureKeyIAMBindingInitParameters struct { + + // The Yandex Key Management Service Asymmetric Signature Key ID to apply a binding to. + // +crossplane:generate:reference:type=AsymmetricSignatureKey + AsymmetricSignatureKeyID *string `json:"asymmetricSignatureKeyId,omitempty" tf:"asymmetric_signature_key_id,omitempty"` + + // Reference to a AsymmetricSignatureKey to populate asymmetricSignatureKeyId. + // +kubebuilder:validation:Optional + AsymmetricSignatureKeyIDRef *v1.Reference `json:"asymmetricSignatureKeyIdRef,omitempty" tf:"-"` + + // Selector for a AsymmetricSignatureKey to populate asymmetricSignatureKeyId. + // +kubebuilder:validation:Optional + AsymmetricSignatureKeyIDSelector *v1.Selector `json:"asymmetricSignatureKeyIdSelector,omitempty" tf:"-"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type AsymmetricSignatureKeyIAMBindingObservation struct { + + // The Yandex Key Management Service Asymmetric Signature Key ID to apply a binding to. + AsymmetricSignatureKeyID *string `json:"asymmetricSignatureKeyId,omitempty" tf:"asymmetric_signature_key_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type AsymmetricSignatureKeyIAMBindingParameters struct { + + // The Yandex Key Management Service Asymmetric Signature Key ID to apply a binding to. + // +crossplane:generate:reference:type=AsymmetricSignatureKey + // +kubebuilder:validation:Optional + AsymmetricSignatureKeyID *string `json:"asymmetricSignatureKeyId,omitempty" tf:"asymmetric_signature_key_id,omitempty"` + + // Reference to a AsymmetricSignatureKey to populate asymmetricSignatureKeyId. + // +kubebuilder:validation:Optional + AsymmetricSignatureKeyIDRef *v1.Reference `json:"asymmetricSignatureKeyIdRef,omitempty" tf:"-"` + + // Selector for a AsymmetricSignatureKey to populate asymmetricSignatureKeyId. + // +kubebuilder:validation:Optional + AsymmetricSignatureKeyIDSelector *v1.Selector `json:"asymmetricSignatureKeyIdSelector,omitempty" tf:"-"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// AsymmetricSignatureKeyIAMBindingSpec defines the desired state of AsymmetricSignatureKeyIAMBinding +type AsymmetricSignatureKeyIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AsymmetricSignatureKeyIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AsymmetricSignatureKeyIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// AsymmetricSignatureKeyIAMBindingStatus defines the observed state of AsymmetricSignatureKeyIAMBinding. +type AsymmetricSignatureKeyIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AsymmetricSignatureKeyIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// AsymmetricSignatureKeyIAMBinding is the Schema for the AsymmetricSignatureKeyIAMBindings API. Allows management of a single IAM binding for a +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type AsymmetricSignatureKeyIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec AsymmetricSignatureKeyIAMBindingSpec `json:"spec"` + Status AsymmetricSignatureKeyIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AsymmetricSignatureKeyIAMBindingList contains a list of AsymmetricSignatureKeyIAMBindings +type AsymmetricSignatureKeyIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AsymmetricSignatureKeyIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + AsymmetricSignatureKeyIAMBinding_Kind = "AsymmetricSignatureKeyIAMBinding" + AsymmetricSignatureKeyIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AsymmetricSignatureKeyIAMBinding_Kind}.String() + AsymmetricSignatureKeyIAMBinding_KindAPIVersion = AsymmetricSignatureKeyIAMBinding_Kind + "." + CRDGroupVersion.String() + AsymmetricSignatureKeyIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(AsymmetricSignatureKeyIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&AsymmetricSignatureKeyIAMBinding{}, &AsymmetricSignatureKeyIAMBindingList{}) +} diff --git a/apis/kms/v1alpha1/zz_generated.conversion_hubs.go b/apis/kms/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..9b62b8a --- /dev/null +++ b/apis/kms/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,24 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *AsymmetricEncryptionKey) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *AsymmetricEncryptionKeyIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *AsymmetricSignatureKey) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *AsymmetricSignatureKeyIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SecretCiphertext) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SymmetricKey) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SymmetricKeyIAMBinding) Hub() {} diff --git a/apis/kms/v1alpha1/zz_generated.deepcopy.go b/apis/kms/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..d538b90 --- /dev/null +++ b/apis/kms/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1920 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKey) DeepCopyInto(out *AsymmetricEncryptionKey) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKey. +func (in *AsymmetricEncryptionKey) DeepCopy() *AsymmetricEncryptionKey { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AsymmetricEncryptionKey) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyIAMBinding) DeepCopyInto(out *AsymmetricEncryptionKeyIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyIAMBinding. +func (in *AsymmetricEncryptionKeyIAMBinding) DeepCopy() *AsymmetricEncryptionKeyIAMBinding { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AsymmetricEncryptionKeyIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyIAMBindingInitParameters) DeepCopyInto(out *AsymmetricEncryptionKeyIAMBindingInitParameters) { + *out = *in + if in.AsymmetricEncryptionKeyID != nil { + in, out := &in.AsymmetricEncryptionKeyID, &out.AsymmetricEncryptionKeyID + *out = new(string) + **out = **in + } + if in.AsymmetricEncryptionKeyIDRef != nil { + in, out := &in.AsymmetricEncryptionKeyIDRef, &out.AsymmetricEncryptionKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AsymmetricEncryptionKeyIDSelector != nil { + in, out := &in.AsymmetricEncryptionKeyIDSelector, &out.AsymmetricEncryptionKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyIAMBindingInitParameters. +func (in *AsymmetricEncryptionKeyIAMBindingInitParameters) DeepCopy() *AsymmetricEncryptionKeyIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyIAMBindingList) DeepCopyInto(out *AsymmetricEncryptionKeyIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AsymmetricEncryptionKeyIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyIAMBindingList. +func (in *AsymmetricEncryptionKeyIAMBindingList) DeepCopy() *AsymmetricEncryptionKeyIAMBindingList { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AsymmetricEncryptionKeyIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyIAMBindingObservation) DeepCopyInto(out *AsymmetricEncryptionKeyIAMBindingObservation) { + *out = *in + if in.AsymmetricEncryptionKeyID != nil { + in, out := &in.AsymmetricEncryptionKeyID, &out.AsymmetricEncryptionKeyID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyIAMBindingObservation. +func (in *AsymmetricEncryptionKeyIAMBindingObservation) DeepCopy() *AsymmetricEncryptionKeyIAMBindingObservation { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyIAMBindingParameters) DeepCopyInto(out *AsymmetricEncryptionKeyIAMBindingParameters) { + *out = *in + if in.AsymmetricEncryptionKeyID != nil { + in, out := &in.AsymmetricEncryptionKeyID, &out.AsymmetricEncryptionKeyID + *out = new(string) + **out = **in + } + if in.AsymmetricEncryptionKeyIDRef != nil { + in, out := &in.AsymmetricEncryptionKeyIDRef, &out.AsymmetricEncryptionKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AsymmetricEncryptionKeyIDSelector != nil { + in, out := &in.AsymmetricEncryptionKeyIDSelector, &out.AsymmetricEncryptionKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyIAMBindingParameters. +func (in *AsymmetricEncryptionKeyIAMBindingParameters) DeepCopy() *AsymmetricEncryptionKeyIAMBindingParameters { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyIAMBindingSpec) DeepCopyInto(out *AsymmetricEncryptionKeyIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyIAMBindingSpec. +func (in *AsymmetricEncryptionKeyIAMBindingSpec) DeepCopy() *AsymmetricEncryptionKeyIAMBindingSpec { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyIAMBindingStatus) DeepCopyInto(out *AsymmetricEncryptionKeyIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyIAMBindingStatus. +func (in *AsymmetricEncryptionKeyIAMBindingStatus) DeepCopy() *AsymmetricEncryptionKeyIAMBindingStatus { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyInitParameters) DeepCopyInto(out *AsymmetricEncryptionKeyInitParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyInitParameters. +func (in *AsymmetricEncryptionKeyInitParameters) DeepCopy() *AsymmetricEncryptionKeyInitParameters { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyList) DeepCopyInto(out *AsymmetricEncryptionKeyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AsymmetricEncryptionKey, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyList. +func (in *AsymmetricEncryptionKeyList) DeepCopy() *AsymmetricEncryptionKeyList { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AsymmetricEncryptionKeyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyObservation) DeepCopyInto(out *AsymmetricEncryptionKeyObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyObservation. +func (in *AsymmetricEncryptionKeyObservation) DeepCopy() *AsymmetricEncryptionKeyObservation { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyParameters) DeepCopyInto(out *AsymmetricEncryptionKeyParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyParameters. +func (in *AsymmetricEncryptionKeyParameters) DeepCopy() *AsymmetricEncryptionKeyParameters { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeySpec) DeepCopyInto(out *AsymmetricEncryptionKeySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeySpec. +func (in *AsymmetricEncryptionKeySpec) DeepCopy() *AsymmetricEncryptionKeySpec { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricEncryptionKeyStatus) DeepCopyInto(out *AsymmetricEncryptionKeyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricEncryptionKeyStatus. +func (in *AsymmetricEncryptionKeyStatus) DeepCopy() *AsymmetricEncryptionKeyStatus { + if in == nil { + return nil + } + out := new(AsymmetricEncryptionKeyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKey) DeepCopyInto(out *AsymmetricSignatureKey) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKey. +func (in *AsymmetricSignatureKey) DeepCopy() *AsymmetricSignatureKey { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AsymmetricSignatureKey) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyIAMBinding) DeepCopyInto(out *AsymmetricSignatureKeyIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyIAMBinding. +func (in *AsymmetricSignatureKeyIAMBinding) DeepCopy() *AsymmetricSignatureKeyIAMBinding { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AsymmetricSignatureKeyIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyIAMBindingInitParameters) DeepCopyInto(out *AsymmetricSignatureKeyIAMBindingInitParameters) { + *out = *in + if in.AsymmetricSignatureKeyID != nil { + in, out := &in.AsymmetricSignatureKeyID, &out.AsymmetricSignatureKeyID + *out = new(string) + **out = **in + } + if in.AsymmetricSignatureKeyIDRef != nil { + in, out := &in.AsymmetricSignatureKeyIDRef, &out.AsymmetricSignatureKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AsymmetricSignatureKeyIDSelector != nil { + in, out := &in.AsymmetricSignatureKeyIDSelector, &out.AsymmetricSignatureKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyIAMBindingInitParameters. +func (in *AsymmetricSignatureKeyIAMBindingInitParameters) DeepCopy() *AsymmetricSignatureKeyIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyIAMBindingList) DeepCopyInto(out *AsymmetricSignatureKeyIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AsymmetricSignatureKeyIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyIAMBindingList. +func (in *AsymmetricSignatureKeyIAMBindingList) DeepCopy() *AsymmetricSignatureKeyIAMBindingList { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AsymmetricSignatureKeyIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyIAMBindingObservation) DeepCopyInto(out *AsymmetricSignatureKeyIAMBindingObservation) { + *out = *in + if in.AsymmetricSignatureKeyID != nil { + in, out := &in.AsymmetricSignatureKeyID, &out.AsymmetricSignatureKeyID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyIAMBindingObservation. +func (in *AsymmetricSignatureKeyIAMBindingObservation) DeepCopy() *AsymmetricSignatureKeyIAMBindingObservation { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyIAMBindingParameters) DeepCopyInto(out *AsymmetricSignatureKeyIAMBindingParameters) { + *out = *in + if in.AsymmetricSignatureKeyID != nil { + in, out := &in.AsymmetricSignatureKeyID, &out.AsymmetricSignatureKeyID + *out = new(string) + **out = **in + } + if in.AsymmetricSignatureKeyIDRef != nil { + in, out := &in.AsymmetricSignatureKeyIDRef, &out.AsymmetricSignatureKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AsymmetricSignatureKeyIDSelector != nil { + in, out := &in.AsymmetricSignatureKeyIDSelector, &out.AsymmetricSignatureKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyIAMBindingParameters. +func (in *AsymmetricSignatureKeyIAMBindingParameters) DeepCopy() *AsymmetricSignatureKeyIAMBindingParameters { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyIAMBindingSpec) DeepCopyInto(out *AsymmetricSignatureKeyIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyIAMBindingSpec. +func (in *AsymmetricSignatureKeyIAMBindingSpec) DeepCopy() *AsymmetricSignatureKeyIAMBindingSpec { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyIAMBindingStatus) DeepCopyInto(out *AsymmetricSignatureKeyIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyIAMBindingStatus. +func (in *AsymmetricSignatureKeyIAMBindingStatus) DeepCopy() *AsymmetricSignatureKeyIAMBindingStatus { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyInitParameters) DeepCopyInto(out *AsymmetricSignatureKeyInitParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SignatureAlgorithm != nil { + in, out := &in.SignatureAlgorithm, &out.SignatureAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyInitParameters. +func (in *AsymmetricSignatureKeyInitParameters) DeepCopy() *AsymmetricSignatureKeyInitParameters { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyList) DeepCopyInto(out *AsymmetricSignatureKeyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AsymmetricSignatureKey, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyList. +func (in *AsymmetricSignatureKeyList) DeepCopy() *AsymmetricSignatureKeyList { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AsymmetricSignatureKeyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyObservation) DeepCopyInto(out *AsymmetricSignatureKeyObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SignatureAlgorithm != nil { + in, out := &in.SignatureAlgorithm, &out.SignatureAlgorithm + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyObservation. +func (in *AsymmetricSignatureKeyObservation) DeepCopy() *AsymmetricSignatureKeyObservation { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyParameters) DeepCopyInto(out *AsymmetricSignatureKeyParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SignatureAlgorithm != nil { + in, out := &in.SignatureAlgorithm, &out.SignatureAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyParameters. +func (in *AsymmetricSignatureKeyParameters) DeepCopy() *AsymmetricSignatureKeyParameters { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeySpec) DeepCopyInto(out *AsymmetricSignatureKeySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeySpec. +func (in *AsymmetricSignatureKeySpec) DeepCopy() *AsymmetricSignatureKeySpec { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsymmetricSignatureKeyStatus) DeepCopyInto(out *AsymmetricSignatureKeyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsymmetricSignatureKeyStatus. +func (in *AsymmetricSignatureKeyStatus) DeepCopy() *AsymmetricSignatureKeyStatus { + if in == nil { + return nil + } + out := new(AsymmetricSignatureKeyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCiphertext) DeepCopyInto(out *SecretCiphertext) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCiphertext. +func (in *SecretCiphertext) DeepCopy() *SecretCiphertext { + if in == nil { + return nil + } + out := new(SecretCiphertext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretCiphertext) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCiphertextInitParameters) DeepCopyInto(out *SecretCiphertextInitParameters) { + *out = *in + if in.AadContext != nil { + in, out := &in.AadContext, &out.AadContext + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + out.PlaintextSecretRef = in.PlaintextSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCiphertextInitParameters. +func (in *SecretCiphertextInitParameters) DeepCopy() *SecretCiphertextInitParameters { + if in == nil { + return nil + } + out := new(SecretCiphertextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCiphertextList) DeepCopyInto(out *SecretCiphertextList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecretCiphertext, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCiphertextList. +func (in *SecretCiphertextList) DeepCopy() *SecretCiphertextList { + if in == nil { + return nil + } + out := new(SecretCiphertextList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretCiphertextList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCiphertextObservation) DeepCopyInto(out *SecretCiphertextObservation) { + *out = *in + if in.AadContext != nil { + in, out := &in.AadContext, &out.AadContext + *out = new(string) + **out = **in + } + if in.Ciphertext != nil { + in, out := &in.Ciphertext, &out.Ciphertext + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCiphertextObservation. +func (in *SecretCiphertextObservation) DeepCopy() *SecretCiphertextObservation { + if in == nil { + return nil + } + out := new(SecretCiphertextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCiphertextParameters) DeepCopyInto(out *SecretCiphertextParameters) { + *out = *in + if in.AadContext != nil { + in, out := &in.AadContext, &out.AadContext + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + out.PlaintextSecretRef = in.PlaintextSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCiphertextParameters. +func (in *SecretCiphertextParameters) DeepCopy() *SecretCiphertextParameters { + if in == nil { + return nil + } + out := new(SecretCiphertextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCiphertextSpec) DeepCopyInto(out *SecretCiphertextSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCiphertextSpec. +func (in *SecretCiphertextSpec) DeepCopy() *SecretCiphertextSpec { + if in == nil { + return nil + } + out := new(SecretCiphertextSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCiphertextStatus) DeepCopyInto(out *SecretCiphertextStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCiphertextStatus. +func (in *SecretCiphertextStatus) DeepCopy() *SecretCiphertextStatus { + if in == nil { + return nil + } + out := new(SecretCiphertextStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKey) DeepCopyInto(out *SymmetricKey) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKey. +func (in *SymmetricKey) DeepCopy() *SymmetricKey { + if in == nil { + return nil + } + out := new(SymmetricKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SymmetricKey) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyIAMBinding) DeepCopyInto(out *SymmetricKeyIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyIAMBinding. +func (in *SymmetricKeyIAMBinding) DeepCopy() *SymmetricKeyIAMBinding { + if in == nil { + return nil + } + out := new(SymmetricKeyIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SymmetricKeyIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyIAMBindingInitParameters) DeepCopyInto(out *SymmetricKeyIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } + if in.SymmetricKeyID != nil { + in, out := &in.SymmetricKeyID, &out.SymmetricKeyID + *out = new(string) + **out = **in + } + if in.SymmetricKeyIDRef != nil { + in, out := &in.SymmetricKeyIDRef, &out.SymmetricKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SymmetricKeyIDSelector != nil { + in, out := &in.SymmetricKeyIDSelector, &out.SymmetricKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyIAMBindingInitParameters. +func (in *SymmetricKeyIAMBindingInitParameters) DeepCopy() *SymmetricKeyIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(SymmetricKeyIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyIAMBindingList) DeepCopyInto(out *SymmetricKeyIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SymmetricKeyIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyIAMBindingList. +func (in *SymmetricKeyIAMBindingList) DeepCopy() *SymmetricKeyIAMBindingList { + if in == nil { + return nil + } + out := new(SymmetricKeyIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SymmetricKeyIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyIAMBindingObservation) DeepCopyInto(out *SymmetricKeyIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } + if in.SymmetricKeyID != nil { + in, out := &in.SymmetricKeyID, &out.SymmetricKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyIAMBindingObservation. +func (in *SymmetricKeyIAMBindingObservation) DeepCopy() *SymmetricKeyIAMBindingObservation { + if in == nil { + return nil + } + out := new(SymmetricKeyIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyIAMBindingParameters) DeepCopyInto(out *SymmetricKeyIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } + if in.SymmetricKeyID != nil { + in, out := &in.SymmetricKeyID, &out.SymmetricKeyID + *out = new(string) + **out = **in + } + if in.SymmetricKeyIDRef != nil { + in, out := &in.SymmetricKeyIDRef, &out.SymmetricKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SymmetricKeyIDSelector != nil { + in, out := &in.SymmetricKeyIDSelector, &out.SymmetricKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyIAMBindingParameters. +func (in *SymmetricKeyIAMBindingParameters) DeepCopy() *SymmetricKeyIAMBindingParameters { + if in == nil { + return nil + } + out := new(SymmetricKeyIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyIAMBindingSpec) DeepCopyInto(out *SymmetricKeyIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyIAMBindingSpec. +func (in *SymmetricKeyIAMBindingSpec) DeepCopy() *SymmetricKeyIAMBindingSpec { + if in == nil { + return nil + } + out := new(SymmetricKeyIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyIAMBindingStatus) DeepCopyInto(out *SymmetricKeyIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyIAMBindingStatus. +func (in *SymmetricKeyIAMBindingStatus) DeepCopy() *SymmetricKeyIAMBindingStatus { + if in == nil { + return nil + } + out := new(SymmetricKeyIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyInitParameters) DeepCopyInto(out *SymmetricKeyInitParameters) { + *out = *in + if in.DefaultAlgorithm != nil { + in, out := &in.DefaultAlgorithm, &out.DefaultAlgorithm + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RotationPeriod != nil { + in, out := &in.RotationPeriod, &out.RotationPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyInitParameters. +func (in *SymmetricKeyInitParameters) DeepCopy() *SymmetricKeyInitParameters { + if in == nil { + return nil + } + out := new(SymmetricKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyList) DeepCopyInto(out *SymmetricKeyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SymmetricKey, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyList. +func (in *SymmetricKeyList) DeepCopy() *SymmetricKeyList { + if in == nil { + return nil + } + out := new(SymmetricKeyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SymmetricKeyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyObservation) DeepCopyInto(out *SymmetricKeyObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DefaultAlgorithm != nil { + in, out := &in.DefaultAlgorithm, &out.DefaultAlgorithm + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RotatedAt != nil { + in, out := &in.RotatedAt, &out.RotatedAt + *out = new(string) + **out = **in + } + if in.RotationPeriod != nil { + in, out := &in.RotationPeriod, &out.RotationPeriod + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyObservation. +func (in *SymmetricKeyObservation) DeepCopy() *SymmetricKeyObservation { + if in == nil { + return nil + } + out := new(SymmetricKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyParameters) DeepCopyInto(out *SymmetricKeyParameters) { + *out = *in + if in.DefaultAlgorithm != nil { + in, out := &in.DefaultAlgorithm, &out.DefaultAlgorithm + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RotationPeriod != nil { + in, out := &in.RotationPeriod, &out.RotationPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyParameters. +func (in *SymmetricKeyParameters) DeepCopy() *SymmetricKeyParameters { + if in == nil { + return nil + } + out := new(SymmetricKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeySpec) DeepCopyInto(out *SymmetricKeySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeySpec. +func (in *SymmetricKeySpec) DeepCopy() *SymmetricKeySpec { + if in == nil { + return nil + } + out := new(SymmetricKeySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SymmetricKeyStatus) DeepCopyInto(out *SymmetricKeyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SymmetricKeyStatus. +func (in *SymmetricKeyStatus) DeepCopy() *SymmetricKeyStatus { + if in == nil { + return nil + } + out := new(SymmetricKeyStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kms/v1alpha1/zz_generated.resolvers.go b/apis/kms/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..b547ff0 --- /dev/null +++ b/apis/kms/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,406 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + iam "github.com/tagesjump/provider-upjet-yc/config/iam" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this AsymmetricEncryptionKey. +func (mg *AsymmetricEncryptionKey) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this AsymmetricEncryptionKeyIAMBinding. +func (mg *AsymmetricEncryptionKeyIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AsymmetricEncryptionKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AsymmetricEncryptionKeyIDRef, + Selector: mg.Spec.ForProvider.AsymmetricEncryptionKeyIDSelector, + To: reference.To{ + List: &AsymmetricEncryptionKeyList{}, + Managed: &AsymmetricEncryptionKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AsymmetricEncryptionKeyID") + } + mg.Spec.ForProvider.AsymmetricEncryptionKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AsymmetricEncryptionKeyIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.ForProvider.ServiceAccountRef, + Selector: mg.Spec.ForProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Members") + } + mg.Spec.ForProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AsymmetricEncryptionKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.AsymmetricEncryptionKeyIDRef, + Selector: mg.Spec.InitProvider.AsymmetricEncryptionKeyIDSelector, + To: reference.To{ + List: &AsymmetricEncryptionKeyList{}, + Managed: &AsymmetricEncryptionKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AsymmetricEncryptionKeyID") + } + mg.Spec.InitProvider.AsymmetricEncryptionKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AsymmetricEncryptionKeyIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.InitProvider.ServiceAccountRef, + Selector: mg.Spec.InitProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Members") + } + mg.Spec.InitProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ServiceAccountRef = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this AsymmetricSignatureKey. +func (mg *AsymmetricSignatureKey) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this AsymmetricSignatureKeyIAMBinding. +func (mg *AsymmetricSignatureKeyIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AsymmetricSignatureKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AsymmetricSignatureKeyIDRef, + Selector: mg.Spec.ForProvider.AsymmetricSignatureKeyIDSelector, + To: reference.To{ + List: &AsymmetricSignatureKeyList{}, + Managed: &AsymmetricSignatureKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AsymmetricSignatureKeyID") + } + mg.Spec.ForProvider.AsymmetricSignatureKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AsymmetricSignatureKeyIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.ForProvider.ServiceAccountRef, + Selector: mg.Spec.ForProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Members") + } + mg.Spec.ForProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AsymmetricSignatureKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.AsymmetricSignatureKeyIDRef, + Selector: mg.Spec.InitProvider.AsymmetricSignatureKeyIDSelector, + To: reference.To{ + List: &AsymmetricSignatureKeyList{}, + Managed: &AsymmetricSignatureKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AsymmetricSignatureKeyID") + } + mg.Spec.InitProvider.AsymmetricSignatureKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AsymmetricSignatureKeyIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.InitProvider.ServiceAccountRef, + Selector: mg.Spec.InitProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Members") + } + mg.Spec.InitProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ServiceAccountRef = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this SecretCiphertext. +func (mg *SecretCiphertext) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KeyIDRef, + Selector: mg.Spec.ForProvider.KeyIDSelector, + To: reference.To{ + List: &SymmetricKeyList{}, + Managed: &SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyID") + } + mg.Spec.ForProvider.KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KeyIDRef, + Selector: mg.Spec.InitProvider.KeyIDSelector, + To: reference.To{ + List: &SymmetricKeyList{}, + Managed: &SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyID") + } + mg.Spec.InitProvider.KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SymmetricKey. +func (mg *SymmetricKey) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SymmetricKeyIAMBinding. +func (mg *SymmetricKeyIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.ForProvider.ServiceAccountRef, + Selector: mg.Spec.ForProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Members") + } + mg.Spec.ForProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SymmetricKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SymmetricKeyIDRef, + Selector: mg.Spec.ForProvider.SymmetricKeyIDSelector, + To: reference.To{ + List: &SymmetricKeyList{}, + Managed: &SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SymmetricKeyID") + } + mg.Spec.ForProvider.SymmetricKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SymmetricKeyIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.InitProvider.ServiceAccountRef, + Selector: mg.Spec.InitProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha11.ServiceAccountList{}, + Managed: &v1alpha11.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Members") + } + mg.Spec.InitProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SymmetricKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SymmetricKeyIDRef, + Selector: mg.Spec.InitProvider.SymmetricKeyIDSelector, + To: reference.To{ + List: &SymmetricKeyList{}, + Managed: &SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SymmetricKeyID") + } + mg.Spec.InitProvider.SymmetricKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SymmetricKeyIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/kms/v1alpha1/zz_groupversion_info.go b/apis/kms/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..2a7689f --- /dev/null +++ b/apis/kms/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kms.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kms.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/kms/v1alpha1/zz_secretciphertext_terraformed.go b/apis/kms/v1alpha1/zz_secretciphertext_terraformed.go new file mode 100755 index 0000000..3daffbb --- /dev/null +++ b/apis/kms/v1alpha1/zz_secretciphertext_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SecretCiphertext +func (mg *SecretCiphertext) GetTerraformResourceType() string { + return "yandex_kms_secret_ciphertext" +} + +// GetConnectionDetailsMapping for this SecretCiphertext +func (tr *SecretCiphertext) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"plaintext": "plaintextSecretRef"} +} + +// GetObservation of this SecretCiphertext +func (tr *SecretCiphertext) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SecretCiphertext +func (tr *SecretCiphertext) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SecretCiphertext +func (tr *SecretCiphertext) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SecretCiphertext +func (tr *SecretCiphertext) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SecretCiphertext +func (tr *SecretCiphertext) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SecretCiphertext +func (tr *SecretCiphertext) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SecretCiphertext +func (tr *SecretCiphertext) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SecretCiphertext using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SecretCiphertext) LateInitialize(attrs []byte) (bool, error) { + params := &SecretCiphertextParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SecretCiphertext) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kms/v1alpha1/zz_secretciphertext_types.go b/apis/kms/v1alpha1/zz_secretciphertext_types.go new file mode 100755 index 0000000..2fcecd9 --- /dev/null +++ b/apis/kms/v1alpha1/zz_secretciphertext_types.go @@ -0,0 +1,132 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SecretCiphertextInitParameters struct { + + // Additional authenticated data (AAD context), optional. If specified, this data will be required for decryption with the SymmetricDecryptRequest + AadContext *string `json:"aadContext,omitempty" tf:"aad_context,omitempty"` + + // ID of the symmetric KMS key to use for encryption. + // +crossplane:generate:reference:type=SymmetricKey + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a SymmetricKey to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a SymmetricKey to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` + + // Plaintext to be encrypted. + PlaintextSecretRef v1.SecretKeySelector `json:"plaintextSecretRef" tf:"-"` +} + +type SecretCiphertextObservation struct { + + // Additional authenticated data (AAD context), optional. If specified, this data will be required for decryption with the SymmetricDecryptRequest + AadContext *string `json:"aadContext,omitempty" tf:"aad_context,omitempty"` + + // Resulting ciphertext, encoded with "standard" base64 alphabet as defined in RFC 4648 section 4 + Ciphertext *string `json:"ciphertext,omitempty" tf:"ciphertext,omitempty"` + + // an identifier for the resource with format {key_id}/{ciphertext} + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ID of the symmetric KMS key to use for encryption. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` +} + +type SecretCiphertextParameters struct { + + // Additional authenticated data (AAD context), optional. If specified, this data will be required for decryption with the SymmetricDecryptRequest + // +kubebuilder:validation:Optional + AadContext *string `json:"aadContext,omitempty" tf:"aad_context,omitempty"` + + // ID of the symmetric KMS key to use for encryption. + // +crossplane:generate:reference:type=SymmetricKey + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a SymmetricKey to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a SymmetricKey to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` + + // Plaintext to be encrypted. + // +kubebuilder:validation:Optional + PlaintextSecretRef v1.SecretKeySelector `json:"plaintextSecretRef" tf:"-"` +} + +// SecretCiphertextSpec defines the desired state of SecretCiphertext +type SecretCiphertextSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SecretCiphertextParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SecretCiphertextInitParameters `json:"initProvider,omitempty"` +} + +// SecretCiphertextStatus defines the observed state of SecretCiphertext. +type SecretCiphertextStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SecretCiphertextObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SecretCiphertext is the Schema for the SecretCiphertexts API. Encrypts given plaintext with the specified Yandex KMS key and provides access to the ciphertext. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SecretCiphertext struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.plaintextSecretRef)",message="spec.forProvider.plaintextSecretRef is a required parameter" + Spec SecretCiphertextSpec `json:"spec"` + Status SecretCiphertextStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SecretCiphertextList contains a list of SecretCiphertexts +type SecretCiphertextList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SecretCiphertext `json:"items"` +} + +// Repository type metadata. +var ( + SecretCiphertext_Kind = "SecretCiphertext" + SecretCiphertext_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SecretCiphertext_Kind}.String() + SecretCiphertext_KindAPIVersion = SecretCiphertext_Kind + "." + CRDGroupVersion.String() + SecretCiphertext_GroupVersionKind = CRDGroupVersion.WithKind(SecretCiphertext_Kind) +) + +func init() { + SchemeBuilder.Register(&SecretCiphertext{}, &SecretCiphertextList{}) +} diff --git a/apis/kms/v1alpha1/zz_symmetrickey_terraformed.go b/apis/kms/v1alpha1/zz_symmetrickey_terraformed.go new file mode 100755 index 0000000..cb0f50b --- /dev/null +++ b/apis/kms/v1alpha1/zz_symmetrickey_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SymmetricKey +func (mg *SymmetricKey) GetTerraformResourceType() string { + return "yandex_kms_symmetric_key" +} + +// GetConnectionDetailsMapping for this SymmetricKey +func (tr *SymmetricKey) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SymmetricKey +func (tr *SymmetricKey) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SymmetricKey +func (tr *SymmetricKey) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SymmetricKey +func (tr *SymmetricKey) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SymmetricKey +func (tr *SymmetricKey) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SymmetricKey +func (tr *SymmetricKey) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SymmetricKey +func (tr *SymmetricKey) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SymmetricKey +func (tr *SymmetricKey) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SymmetricKey using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SymmetricKey) LateInitialize(attrs []byte) (bool, error) { + params := &SymmetricKeyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SymmetricKey) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/kms/v1alpha1/zz_symmetrickey_types.go b/apis/kms/v1alpha1/zz_symmetrickey_types.go new file mode 100755 index 0000000..8dd83ef --- /dev/null +++ b/apis/kms/v1alpha1/zz_symmetrickey_types.go @@ -0,0 +1,179 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SymmetricKeyInitParameters struct { + + // Encryption algorithm to be used with a new key version, generated with the next rotation. The default value is AES_128. + DefaultAlgorithm *string `json:"defaultAlgorithm,omitempty" tf:"default_algorithm,omitempty"` + + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of the key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the key. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Interval between automatic rotations. To disable automatic rotation, omit this parameter. + RotationPeriod *string `json:"rotationPeriod,omitempty" tf:"rotation_period,omitempty"` +} + +type SymmetricKeyObservation struct { + + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Encryption algorithm to be used with a new key version, generated with the next rotation. The default value is AES_128. + DefaultAlgorithm *string `json:"defaultAlgorithm,omitempty" tf:"default_algorithm,omitempty"` + + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of the key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the key. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Last rotation timestamp of the key. + RotatedAt *string `json:"rotatedAt,omitempty" tf:"rotated_at,omitempty"` + + // Interval between automatic rotations. To disable automatic rotation, omit this parameter. + RotationPeriod *string `json:"rotationPeriod,omitempty" tf:"rotation_period,omitempty"` + + // The status of the key. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type SymmetricKeyParameters struct { + + // Encryption algorithm to be used with a new key version, generated with the next rotation. The default value is AES_128. + // +kubebuilder:validation:Optional + DefaultAlgorithm *string `json:"defaultAlgorithm,omitempty" tf:"default_algorithm,omitempty"` + + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of the key. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the key. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the key. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Interval between automatic rotations. To disable automatic rotation, omit this parameter. + // +kubebuilder:validation:Optional + RotationPeriod *string `json:"rotationPeriod,omitempty" tf:"rotation_period,omitempty"` +} + +// SymmetricKeySpec defines the desired state of SymmetricKey +type SymmetricKeySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SymmetricKeyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SymmetricKeyInitParameters `json:"initProvider,omitempty"` +} + +// SymmetricKeyStatus defines the observed state of SymmetricKey. +type SymmetricKeyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SymmetricKeyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SymmetricKey is the Schema for the SymmetricKeys API. Creates a Yandex KMS symmetric key that can be used for cryptographic operation. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SymmetricKey struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SymmetricKeySpec `json:"spec"` + Status SymmetricKeyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SymmetricKeyList contains a list of SymmetricKeys +type SymmetricKeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SymmetricKey `json:"items"` +} + +// Repository type metadata. +var ( + SymmetricKey_Kind = "SymmetricKey" + SymmetricKey_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SymmetricKey_Kind}.String() + SymmetricKey_KindAPIVersion = SymmetricKey_Kind + "." + CRDGroupVersion.String() + SymmetricKey_GroupVersionKind = CRDGroupVersion.WithKind(SymmetricKey_Kind) +) + +func init() { + SchemeBuilder.Register(&SymmetricKey{}, &SymmetricKeyList{}) +} diff --git a/apis/kms/v1alpha1/zz_symmetrickeyiambinding_terraformed.go b/apis/kms/v1alpha1/zz_symmetrickeyiambinding_terraformed.go new file mode 100755 index 0000000..6a04230 --- /dev/null +++ b/apis/kms/v1alpha1/zz_symmetrickeyiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SymmetricKeyIAMBinding +func (mg *SymmetricKeyIAMBinding) GetTerraformResourceType() string { + return "yandex_kms_symmetric_key_iam_binding" +} + +// GetConnectionDetailsMapping for this SymmetricKeyIAMBinding +func (tr *SymmetricKeyIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SymmetricKeyIAMBinding +func (tr *SymmetricKeyIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SymmetricKeyIAMBinding +func (tr *SymmetricKeyIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SymmetricKeyIAMBinding +func (tr *SymmetricKeyIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SymmetricKeyIAMBinding +func (tr *SymmetricKeyIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SymmetricKeyIAMBinding +func (tr *SymmetricKeyIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SymmetricKeyIAMBinding +func (tr *SymmetricKeyIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SymmetricKeyIAMBinding +func (tr *SymmetricKeyIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SymmetricKeyIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SymmetricKeyIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &SymmetricKeyIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SymmetricKeyIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kms/v1alpha1/zz_symmetrickeyiambinding_types.go b/apis/kms/v1alpha1/zz_symmetrickeyiambinding_types.go new file mode 100755 index 0000000..d698be2 --- /dev/null +++ b/apis/kms/v1alpha1/zz_symmetrickeyiambinding_types.go @@ -0,0 +1,164 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SymmetricKeyIAMBindingInitParameters struct { + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + + // The Yandex Key Management Service Symmetric Key ID to apply a binding to. + // +crossplane:generate:reference:type=SymmetricKey + SymmetricKeyID *string `json:"symmetricKeyId,omitempty" tf:"symmetric_key_id,omitempty"` + + // Reference to a SymmetricKey to populate symmetricKeyId. + // +kubebuilder:validation:Optional + SymmetricKeyIDRef *v1.Reference `json:"symmetricKeyIdRef,omitempty" tf:"-"` + + // Selector for a SymmetricKey to populate symmetricKeyId. + // +kubebuilder:validation:Optional + SymmetricKeyIDSelector *v1.Selector `json:"symmetricKeyIdSelector,omitempty" tf:"-"` +} + +type SymmetricKeyIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + + // The Yandex Key Management Service Symmetric Key ID to apply a binding to. + SymmetricKeyID *string `json:"symmetricKeyId,omitempty" tf:"symmetric_key_id,omitempty"` +} + +type SymmetricKeyIAMBindingParameters struct { + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + + // The Yandex Key Management Service Symmetric Key ID to apply a binding to. + // +crossplane:generate:reference:type=SymmetricKey + // +kubebuilder:validation:Optional + SymmetricKeyID *string `json:"symmetricKeyId,omitempty" tf:"symmetric_key_id,omitempty"` + + // Reference to a SymmetricKey to populate symmetricKeyId. + // +kubebuilder:validation:Optional + SymmetricKeyIDRef *v1.Reference `json:"symmetricKeyIdRef,omitempty" tf:"-"` + + // Selector for a SymmetricKey to populate symmetricKeyId. + // +kubebuilder:validation:Optional + SymmetricKeyIDSelector *v1.Selector `json:"symmetricKeyIdSelector,omitempty" tf:"-"` +} + +// SymmetricKeyIAMBindingSpec defines the desired state of SymmetricKeyIAMBinding +type SymmetricKeyIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SymmetricKeyIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SymmetricKeyIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// SymmetricKeyIAMBindingStatus defines the observed state of SymmetricKeyIAMBinding. +type SymmetricKeyIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SymmetricKeyIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SymmetricKeyIAMBinding is the Schema for the SymmetricKeyIAMBindings API. Allows management of a single IAM binding for a +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SymmetricKeyIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec SymmetricKeyIAMBindingSpec `json:"spec"` + Status SymmetricKeyIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SymmetricKeyIAMBindingList contains a list of SymmetricKeyIAMBindings +type SymmetricKeyIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SymmetricKeyIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + SymmetricKeyIAMBinding_Kind = "SymmetricKeyIAMBinding" + SymmetricKeyIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SymmetricKeyIAMBinding_Kind}.String() + SymmetricKeyIAMBinding_KindAPIVersion = SymmetricKeyIAMBinding_Kind + "." + CRDGroupVersion.String() + SymmetricKeyIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(SymmetricKeyIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&SymmetricKeyIAMBinding{}, &SymmetricKeyIAMBindingList{}) +} diff --git a/apis/kubernetes/v1alpha1/zz_cluster_terraformed.go b/apis/kubernetes/v1alpha1/zz_cluster_terraformed.go new file mode 100755 index 0000000..05119d6 --- /dev/null +++ b/apis/kubernetes/v1alpha1/zz_cluster_terraformed.go @@ -0,0 +1,126 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "yandex_kubernetes_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("Master.Regional.Location")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kubernetes/v1alpha1/zz_cluster_types.go b/apis/kubernetes/v1alpha1/zz_cluster_types.go new file mode 100755 index 0000000..5626801 --- /dev/null +++ b/apis/kubernetes/v1alpha1/zz_cluster_types.go @@ -0,0 +1,936 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CiliumInitParameters struct { +} + +type CiliumObservation struct { +} + +type CiliumParameters struct { +} + +type ClusterInitParameters struct { + + // CIDR block. IP range for allocating pod addresses. It should not overlap with any subnet in the network the Kubernetes cluster located in. Static routes will be set up for this CIDR blocks in node subnets. + ClusterIPv4Range *string `json:"clusterIpv4Range,omitempty" tf:"cluster_ipv4_range,omitempty"` + + // Identical to cluster_ipv4_range but for IPv6 protocol. + ClusterIPv6Range *string `json:"clusterIpv6Range,omitempty" tf:"cluster_ipv6_range,omitempty"` + + // A description of the Kubernetes cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the Kubernetes cluster belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // cluster KMS provider parameters. + KMSProvider []KMSProviderInitParameters `json:"kmsProvider,omitempty" tf:"kms_provider,omitempty"` + + // A set of key/value label pairs to assign to the Kubernetes cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Kubernetes master configuration options. The structure is documented below. + Master []MasterInitParameters `json:"master,omitempty" tf:"master,omitempty"` + + // Name of a specific Kubernetes cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the cluster network. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Network Implementation options. The structure is documented below. + NetworkImplementation []NetworkImplementationInitParameters `json:"networkImplementation,omitempty" tf:"network_implementation,omitempty"` + + // Network policy provider for the cluster. Possible values: CALICO. + NetworkPolicyProvider *string `json:"networkPolicyProvider,omitempty" tf:"network_policy_provider,omitempty"` + + // Size of the masks that are assigned to each node in the cluster. Effectively limits maximum number of pods for each node. + NodeIPv4CidrMaskSize *float64 `json:"nodeIpv4CidrMaskSize,omitempty" tf:"node_ipv4_cidr_mask_size,omitempty"` + + // Service account to be used by the worker nodes of the Kubernetes cluster to access Container Registry or to push node logs and metrics. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + NodeServiceAccountID *string `json:"nodeServiceAccountId,omitempty" tf:"node_service_account_id,omitempty"` + + // Reference to a SecurityGroup in vpc to populate nodeServiceAccountId. + // +kubebuilder:validation:Optional + NodeServiceAccountIDRef *v1.Reference `json:"nodeServiceAccountIdRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup in vpc to populate nodeServiceAccountId. + // +kubebuilder:validation:Optional + NodeServiceAccountIDSelector *v1.Selector `json:"nodeServiceAccountIdSelector,omitempty" tf:"-"` + + // Cluster release channel. + ReleaseChannel *string `json:"releaseChannel,omitempty" tf:"release_channel,omitempty"` + + // Service account to be used for provisioning Compute Cloud and VPC resources for Kubernetes cluster. Selected service account should have edit role on the folder where the Kubernetes cluster will be located and on the folder where selected network resides. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a SecurityGroup in vpc to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup in vpc to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // CIDR block. IP range Kubernetes service Kubernetes cluster IP addresses will be allocated from. It should not overlap with any subnet in the network the Kubernetes cluster located in. + ServiceIPv4Range *string `json:"serviceIpv4Range,omitempty" tf:"service_ipv4_range,omitempty"` + + // Identical to service_ipv4_range but for IPv6 protocol. + ServiceIPv6Range *string `json:"serviceIpv6Range,omitempty" tf:"service_ipv6_range,omitempty"` +} + +type ClusterObservation struct { + + // CIDR block. IP range for allocating pod addresses. It should not overlap with any subnet in the network the Kubernetes cluster located in. Static routes will be set up for this CIDR blocks in node subnets. + ClusterIPv4Range *string `json:"clusterIpv4Range,omitempty" tf:"cluster_ipv4_range,omitempty"` + + // Identical to cluster_ipv4_range but for IPv6 protocol. + ClusterIPv6Range *string `json:"clusterIpv6Range,omitempty" tf:"cluster_ipv6_range,omitempty"` + + // (Computed) The Kubernetes cluster creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // A description of the Kubernetes cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the Kubernetes cluster belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // (Computed) Health of the Kubernetes cluster. + Health *string `json:"health,omitempty" tf:"health,omitempty"` + + // (Computed) ID of a new Kubernetes cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // cluster KMS provider parameters. + KMSProvider []KMSProviderObservation `json:"kmsProvider,omitempty" tf:"kms_provider,omitempty"` + + // A set of key/value label pairs to assign to the Kubernetes cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Log group where cluster stores cluster system logs, like audit, events, or controlplane logs. + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Kubernetes master configuration options. The structure is documented below. + Master []MasterObservation `json:"master,omitempty" tf:"master,omitempty"` + + // Name of a specific Kubernetes cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the cluster network. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Network Implementation options. The structure is documented below. + NetworkImplementation []NetworkImplementationObservation `json:"networkImplementation,omitempty" tf:"network_implementation,omitempty"` + + // Network policy provider for the cluster. Possible values: CALICO. + NetworkPolicyProvider *string `json:"networkPolicyProvider,omitempty" tf:"network_policy_provider,omitempty"` + + // Size of the masks that are assigned to each node in the cluster. Effectively limits maximum number of pods for each node. + NodeIPv4CidrMaskSize *float64 `json:"nodeIpv4CidrMaskSize,omitempty" tf:"node_ipv4_cidr_mask_size,omitempty"` + + // Service account to be used by the worker nodes of the Kubernetes cluster to access Container Registry or to push node logs and metrics. + NodeServiceAccountID *string `json:"nodeServiceAccountId,omitempty" tf:"node_service_account_id,omitempty"` + + // Cluster release channel. + ReleaseChannel *string `json:"releaseChannel,omitempty" tf:"release_channel,omitempty"` + + // Service account to be used for provisioning Compute Cloud and VPC resources for Kubernetes cluster. Selected service account should have edit role on the folder where the Kubernetes cluster will be located and on the folder where selected network resides. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // CIDR block. IP range Kubernetes service Kubernetes cluster IP addresses will be allocated from. It should not overlap with any subnet in the network the Kubernetes cluster located in. + ServiceIPv4Range *string `json:"serviceIpv4Range,omitempty" tf:"service_ipv4_range,omitempty"` + + // Identical to service_ipv4_range but for IPv6 protocol. + ServiceIPv6Range *string `json:"serviceIpv6Range,omitempty" tf:"service_ipv6_range,omitempty"` + + // (Computed)Status of the Kubernetes cluster. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ClusterParameters struct { + + // CIDR block. IP range for allocating pod addresses. It should not overlap with any subnet in the network the Kubernetes cluster located in. Static routes will be set up for this CIDR blocks in node subnets. + // +kubebuilder:validation:Optional + ClusterIPv4Range *string `json:"clusterIpv4Range,omitempty" tf:"cluster_ipv4_range,omitempty"` + + // Identical to cluster_ipv4_range but for IPv6 protocol. + // +kubebuilder:validation:Optional + ClusterIPv6Range *string `json:"clusterIpv6Range,omitempty" tf:"cluster_ipv6_range,omitempty"` + + // A description of the Kubernetes cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder that the Kubernetes cluster belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // cluster KMS provider parameters. + // +kubebuilder:validation:Optional + KMSProvider []KMSProviderParameters `json:"kmsProvider,omitempty" tf:"kms_provider,omitempty"` + + // A set of key/value label pairs to assign to the Kubernetes cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Kubernetes master configuration options. The structure is documented below. + // +kubebuilder:validation:Optional + Master []MasterParameters `json:"master,omitempty" tf:"master,omitempty"` + + // Name of a specific Kubernetes cluster. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the cluster network. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Network Implementation options. The structure is documented below. + // +kubebuilder:validation:Optional + NetworkImplementation []NetworkImplementationParameters `json:"networkImplementation,omitempty" tf:"network_implementation,omitempty"` + + // Network policy provider for the cluster. Possible values: CALICO. + // +kubebuilder:validation:Optional + NetworkPolicyProvider *string `json:"networkPolicyProvider,omitempty" tf:"network_policy_provider,omitempty"` + + // Size of the masks that are assigned to each node in the cluster. Effectively limits maximum number of pods for each node. + // +kubebuilder:validation:Optional + NodeIPv4CidrMaskSize *float64 `json:"nodeIpv4CidrMaskSize,omitempty" tf:"node_ipv4_cidr_mask_size,omitempty"` + + // Service account to be used by the worker nodes of the Kubernetes cluster to access Container Registry or to push node logs and metrics. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + NodeServiceAccountID *string `json:"nodeServiceAccountId,omitempty" tf:"node_service_account_id,omitempty"` + + // Reference to a SecurityGroup in vpc to populate nodeServiceAccountId. + // +kubebuilder:validation:Optional + NodeServiceAccountIDRef *v1.Reference `json:"nodeServiceAccountIdRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup in vpc to populate nodeServiceAccountId. + // +kubebuilder:validation:Optional + NodeServiceAccountIDSelector *v1.Selector `json:"nodeServiceAccountIdSelector,omitempty" tf:"-"` + + // Cluster release channel. + // +kubebuilder:validation:Optional + ReleaseChannel *string `json:"releaseChannel,omitempty" tf:"release_channel,omitempty"` + + // Service account to be used for provisioning Compute Cloud and VPC resources for Kubernetes cluster. Selected service account should have edit role on the folder where the Kubernetes cluster will be located and on the folder where selected network resides. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a SecurityGroup in vpc to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup in vpc to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // CIDR block. IP range Kubernetes service Kubernetes cluster IP addresses will be allocated from. It should not overlap with any subnet in the network the Kubernetes cluster located in. + // +kubebuilder:validation:Optional + ServiceIPv4Range *string `json:"serviceIpv4Range,omitempty" tf:"service_ipv4_range,omitempty"` + + // Identical to service_ipv4_range but for IPv6 protocol. + // +kubebuilder:validation:Optional + ServiceIPv6Range *string `json:"serviceIpv6Range,omitempty" tf:"service_ipv6_range,omitempty"` +} + +type KMSProviderInitParameters struct { + + // KMS key ID. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1.SymmetricKey + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a SymmetricKey in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a SymmetricKey in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` +} + +type KMSProviderObservation struct { + + // KMS key ID. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` +} + +type KMSProviderParameters struct { + + // KMS key ID. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1.SymmetricKey + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a SymmetricKey in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a SymmetricKey in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` +} + +type LocationInitParameters struct { + + // ID of the subnet. If no ID is specified, and there only one subnet in specified zone, an address in this subnet will be allocated. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // ID of the availability zone. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("zone",false) + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + + // Reference to a Subnet in vpc to populate zone. + // +kubebuilder:validation:Optional + ZoneRef *v1.Reference `json:"zoneRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate zone. + // +kubebuilder:validation:Optional + ZoneSelector *v1.Selector `json:"zoneSelector,omitempty" tf:"-"` +} + +type LocationObservation struct { + + // ID of the subnet. If no ID is specified, and there only one subnet in specified zone, an address in this subnet will be allocated. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // ID of the availability zone. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type LocationParameters struct { + + // ID of the subnet. If no ID is specified, and there only one subnet in specified zone, an address in this subnet will be allocated. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // ID of the availability zone. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("zone",false) + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + + // Reference to a Subnet in vpc to populate zone. + // +kubebuilder:validation:Optional + ZoneRef *v1.Reference `json:"zoneRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate zone. + // +kubebuilder:validation:Optional + ZoneSelector *v1.Selector `json:"zoneSelector,omitempty" tf:"-"` +} + +type MaintenancePolicyInitParameters struct { + + // Boolean flag that specifies if master can be upgraded automatically. When omitted, default value is TRUE. + AutoUpgrade *bool `json:"autoUpgrade,omitempty" tf:"auto_upgrade,omitempty"` + + // (Computed) This structure specifies maintenance window, when update for master is allowed. When omitted, it defaults to any time. To specify time of day interval, for all days, one element should be provided, with two fields set, start_time and duration. Please see zonal_cluster_resource_name config example. + MaintenanceWindow []MaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` +} + +type MaintenancePolicyObservation struct { + + // Boolean flag that specifies if master can be upgraded automatically. When omitted, default value is TRUE. + AutoUpgrade *bool `json:"autoUpgrade,omitempty" tf:"auto_upgrade,omitempty"` + + // (Computed) This structure specifies maintenance window, when update for master is allowed. When omitted, it defaults to any time. To specify time of day interval, for all days, one element should be provided, with two fields set, start_time and duration. Please see zonal_cluster_resource_name config example. + MaintenanceWindow []MaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` +} + +type MaintenancePolicyParameters struct { + + // Boolean flag that specifies if master can be upgraded automatically. When omitted, default value is TRUE. + // +kubebuilder:validation:Optional + AutoUpgrade *bool `json:"autoUpgrade" tf:"auto_upgrade,omitempty"` + + // (Computed) This structure specifies maintenance window, when update for master is allowed. When omitted, it defaults to any time. To specify time of day interval, for all days, one element should be provided, with two fields set, start_time and duration. Please see zonal_cluster_resource_name config example. + // +kubebuilder:validation:Optional + MaintenanceWindow []MaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` +} + +type MaintenanceWindowInitParameters struct { + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type MaintenanceWindowObservation struct { + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type MaintenanceWindowParameters struct { + + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // +kubebuilder:validation:Optional + Duration *string `json:"duration" tf:"duration,omitempty"` + + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime" tf:"start_time,omitempty"` +} + +type MasterInitParameters struct { + EtcdClusterSize *float64 `json:"etcdClusterSize,omitempty" tf:"etcd_cluster_size,omitempty"` + + ExternalV6Address *string `json:"externalV6Address,omitempty" tf:"external_v6_address,omitempty"` + + // (Computed) Maintenance policy for Kubernetes master. If policy is omitted, automatic revision upgrades of the kubernetes master are enabled and could happen at any time. Revision upgrades are performed only within the same minor version, e.g. 1.13. Minor version upgrades (e.g. 1.13->1.14) should be performed manually. The structure is documented below. + MaintenancePolicy []MaintenancePolicyInitParameters `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Cluster master's instances locations array (zone and subnet). Cannot be used together with zonal or regional. Currently, supports either one, for zonal master, or three instances of master_location. Can be updated inplace. When creating regional cluster (three master instances), its region will be evaluated automatically by backend. The structure is documented below. + MasterLocation []MasterLocationInitParameters `json:"masterLocation,omitempty" tf:"master_location,omitempty"` + + // Master Logging options. The structure is documented below. + MasterLogging []MasterLoggingInitParameters `json:"masterLogging,omitempty" tf:"master_logging,omitempty"` + + // (Computed) Boolean flag. When true, Kubernetes master will have visible ipv4 address. + PublicIP *bool `json:"publicIp,omitempty" tf:"public_ip,omitempty"` + + // Initialize parameters for Regional Master (highly available master). The structure is documented below. + Regional []RegionalInitParameters `json:"regional,omitempty" tf:"regional,omitempty"` + + // List of security group IDs to which the Kubernetes cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // (Computed) Version of Kubernetes that will be used for master. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Initialize parameters for Zonal Master (single node master). The structure is documented below. + Zonal []ZonalInitParameters `json:"zonal,omitempty" tf:"zonal,omitempty"` +} + +type MasterLocationInitParameters struct { + + // ID of the subnet. If no ID is specified, and there only one subnet in specified zone, an address in this subnet will be allocated. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // ID of the availability zone. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type MasterLocationObservation struct { + + // ID of the subnet. If no ID is specified, and there only one subnet in specified zone, an address in this subnet will be allocated. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // ID of the availability zone. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type MasterLocationParameters struct { + + // ID of the subnet. If no ID is specified, and there only one subnet in specified zone, an address in this subnet will be allocated. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // ID of the availability zone. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type MasterLoggingInitParameters struct { + + // Boolean flag that specifies if kube-apiserver audit logs should be sent to Yandex Cloud Logging. + AuditEnabled *bool `json:"auditEnabled,omitempty" tf:"audit_enabled,omitempty"` + + // Boolean flag that specifies if cluster-autoscaler logs should be sent to Yandex Cloud Logging. + ClusterAutoscalerEnabled *bool `json:"clusterAutoscalerEnabled,omitempty" tf:"cluster_autoscaler_enabled,omitempty"` + + // Boolean flag that specifies if master components logs should be sent to Yandex Cloud Logging. The exact components that will send their logs must be configured via the options described below. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Boolean flag that specifies if kubernetes cluster events should be sent to Yandex Cloud Logging. + EventsEnabled *bool `json:"eventsEnabled,omitempty" tf:"events_enabled,omitempty"` + + // ID of the folder default Log group of which should be used to collect logs. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Boolean flag that specifies if kube-apiserver logs should be sent to Yandex Cloud Logging. + KubeApiserverEnabled *bool `json:"kubeApiserverEnabled,omitempty" tf:"kube_apiserver_enabled,omitempty"` + + // ID of the Yandex Cloud Logging Log group. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/logging/v1alpha1.Group + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Reference to a Group in logging to populate logGroupId. + // +kubebuilder:validation:Optional + LogGroupIDRef *v1.Reference `json:"logGroupIdRef,omitempty" tf:"-"` + + // Selector for a Group in logging to populate logGroupId. + // +kubebuilder:validation:Optional + LogGroupIDSelector *v1.Selector `json:"logGroupIdSelector,omitempty" tf:"-"` +} + +type MasterLoggingObservation struct { + + // Boolean flag that specifies if kube-apiserver audit logs should be sent to Yandex Cloud Logging. + AuditEnabled *bool `json:"auditEnabled,omitempty" tf:"audit_enabled,omitempty"` + + // Boolean flag that specifies if cluster-autoscaler logs should be sent to Yandex Cloud Logging. + ClusterAutoscalerEnabled *bool `json:"clusterAutoscalerEnabled,omitempty" tf:"cluster_autoscaler_enabled,omitempty"` + + // Boolean flag that specifies if master components logs should be sent to Yandex Cloud Logging. The exact components that will send their logs must be configured via the options described below. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Boolean flag that specifies if kubernetes cluster events should be sent to Yandex Cloud Logging. + EventsEnabled *bool `json:"eventsEnabled,omitempty" tf:"events_enabled,omitempty"` + + // ID of the folder default Log group of which should be used to collect logs. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Boolean flag that specifies if kube-apiserver logs should be sent to Yandex Cloud Logging. + KubeApiserverEnabled *bool `json:"kubeApiserverEnabled,omitempty" tf:"kube_apiserver_enabled,omitempty"` + + // ID of the Yandex Cloud Logging Log group. + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` +} + +type MasterLoggingParameters struct { + + // Boolean flag that specifies if kube-apiserver audit logs should be sent to Yandex Cloud Logging. + // +kubebuilder:validation:Optional + AuditEnabled *bool `json:"auditEnabled,omitempty" tf:"audit_enabled,omitempty"` + + // Boolean flag that specifies if cluster-autoscaler logs should be sent to Yandex Cloud Logging. + // +kubebuilder:validation:Optional + ClusterAutoscalerEnabled *bool `json:"clusterAutoscalerEnabled,omitempty" tf:"cluster_autoscaler_enabled,omitempty"` + + // Boolean flag that specifies if master components logs should be sent to Yandex Cloud Logging. The exact components that will send their logs must be configured via the options described below. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Boolean flag that specifies if kubernetes cluster events should be sent to Yandex Cloud Logging. + // +kubebuilder:validation:Optional + EventsEnabled *bool `json:"eventsEnabled,omitempty" tf:"events_enabled,omitempty"` + + // ID of the folder default Log group of which should be used to collect logs. + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Boolean flag that specifies if kube-apiserver logs should be sent to Yandex Cloud Logging. + // +kubebuilder:validation:Optional + KubeApiserverEnabled *bool `json:"kubeApiserverEnabled,omitempty" tf:"kube_apiserver_enabled,omitempty"` + + // ID of the Yandex Cloud Logging Log group. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/logging/v1alpha1.Group + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Reference to a Group in logging to populate logGroupId. + // +kubebuilder:validation:Optional + LogGroupIDRef *v1.Reference `json:"logGroupIdRef,omitempty" tf:"-"` + + // Selector for a Group in logging to populate logGroupId. + // +kubebuilder:validation:Optional + LogGroupIDSelector *v1.Selector `json:"logGroupIdSelector,omitempty" tf:"-"` +} + +type MasterObservation struct { + + // (Computed) PEM-encoded public certificate that is the root of trust for the Kubernetes cluster. + ClusterCACertificate *string `json:"clusterCaCertificate,omitempty" tf:"cluster_ca_certificate,omitempty"` + + EtcdClusterSize *float64 `json:"etcdClusterSize,omitempty" tf:"etcd_cluster_size,omitempty"` + + // (Computed) An IPv4 external network address that is assigned to the master. + ExternalV4Address *string `json:"externalV4Address,omitempty" tf:"external_v4_address,omitempty"` + + // (Computed) External endpoint that can be used to access Kubernetes cluster API from the internet (outside of the cloud). + ExternalV4Endpoint *string `json:"externalV4Endpoint,omitempty" tf:"external_v4_endpoint,omitempty"` + + ExternalV6Address *string `json:"externalV6Address,omitempty" tf:"external_v6_address,omitempty"` + + ExternalV6Endpoint *string `json:"externalV6Endpoint,omitempty" tf:"external_v6_endpoint,omitempty"` + + // (Computed) An IPv4 internal network address that is assigned to the master. + InternalV4Address *string `json:"internalV4Address,omitempty" tf:"internal_v4_address,omitempty"` + + // (Computed) Internal endpoint that can be used to connect to the master from cloud networks. + InternalV4Endpoint *string `json:"internalV4Endpoint,omitempty" tf:"internal_v4_endpoint,omitempty"` + + // (Computed) Maintenance policy for Kubernetes master. If policy is omitted, automatic revision upgrades of the kubernetes master are enabled and could happen at any time. Revision upgrades are performed only within the same minor version, e.g. 1.13. Minor version upgrades (e.g. 1.13->1.14) should be performed manually. The structure is documented below. + MaintenancePolicy []MaintenancePolicyObservation `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Cluster master's instances locations array (zone and subnet). Cannot be used together with zonal or regional. Currently, supports either one, for zonal master, or three instances of master_location. Can be updated inplace. When creating regional cluster (three master instances), its region will be evaluated automatically by backend. The structure is documented below. + MasterLocation []MasterLocationObservation `json:"masterLocation,omitempty" tf:"master_location,omitempty"` + + // Master Logging options. The structure is documented below. + MasterLogging []MasterLoggingObservation `json:"masterLogging,omitempty" tf:"master_logging,omitempty"` + + // (Computed) Boolean flag. When true, Kubernetes master will have visible ipv4 address. + PublicIP *bool `json:"publicIp,omitempty" tf:"public_ip,omitempty"` + + // Initialize parameters for Regional Master (highly available master). The structure is documented below. + Regional []RegionalObservation `json:"regional,omitempty" tf:"regional,omitempty"` + + // List of security group IDs to which the Kubernetes cluster belongs. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // (Computed) Version of Kubernetes that will be used for master. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // (Computed) Information about cluster version. The structure is documented below. + VersionInfo []VersionInfoObservation `json:"versionInfo,omitempty" tf:"version_info,omitempty"` + + // Initialize parameters for Zonal Master (single node master). The structure is documented below. + Zonal []ZonalObservation `json:"zonal,omitempty" tf:"zonal,omitempty"` +} + +type MasterParameters struct { + + // +kubebuilder:validation:Optional + EtcdClusterSize *float64 `json:"etcdClusterSize,omitempty" tf:"etcd_cluster_size,omitempty"` + + // +kubebuilder:validation:Optional + ExternalV6Address *string `json:"externalV6Address,omitempty" tf:"external_v6_address,omitempty"` + + // (Computed) Maintenance policy for Kubernetes master. If policy is omitted, automatic revision upgrades of the kubernetes master are enabled and could happen at any time. Revision upgrades are performed only within the same minor version, e.g. 1.13. Minor version upgrades (e.g. 1.13->1.14) should be performed manually. The structure is documented below. + // +kubebuilder:validation:Optional + MaintenancePolicy []MaintenancePolicyParameters `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Cluster master's instances locations array (zone and subnet). Cannot be used together with zonal or regional. Currently, supports either one, for zonal master, or three instances of master_location. Can be updated inplace. When creating regional cluster (three master instances), its region will be evaluated automatically by backend. The structure is documented below. + // +kubebuilder:validation:Optional + MasterLocation []MasterLocationParameters `json:"masterLocation,omitempty" tf:"master_location,omitempty"` + + // Master Logging options. The structure is documented below. + // +kubebuilder:validation:Optional + MasterLogging []MasterLoggingParameters `json:"masterLogging,omitempty" tf:"master_logging,omitempty"` + + // (Computed) Boolean flag. When true, Kubernetes master will have visible ipv4 address. + // +kubebuilder:validation:Optional + PublicIP *bool `json:"publicIp,omitempty" tf:"public_ip,omitempty"` + + // Initialize parameters for Regional Master (highly available master). The structure is documented below. + // +kubebuilder:validation:Optional + Regional []RegionalParameters `json:"regional,omitempty" tf:"regional,omitempty"` + + // List of security group IDs to which the Kubernetes cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // (Computed) Version of Kubernetes that will be used for master. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Initialize parameters for Zonal Master (single node master). The structure is documented below. + // +kubebuilder:validation:Optional + Zonal []ZonalParameters `json:"zonal,omitempty" tf:"zonal,omitempty"` +} + +type NetworkImplementationInitParameters struct { + + // Cilium network implementation configuration. No options exist. + Cilium []CiliumInitParameters `json:"cilium,omitempty" tf:"cilium,omitempty"` +} + +type NetworkImplementationObservation struct { + + // Cilium network implementation configuration. No options exist. + Cilium []CiliumParameters `json:"cilium,omitempty" tf:"cilium,omitempty"` +} + +type NetworkImplementationParameters struct { + + // Cilium network implementation configuration. No options exist. + // +kubebuilder:validation:Optional + Cilium []CiliumParameters `json:"cilium,omitempty" tf:"cilium,omitempty"` +} + +type RegionalInitParameters struct { + + // Array of locations, where master instances will be allocated. The structure is documented below. + Location []LocationInitParameters `json:"location,omitempty" tf:"location,omitempty"` + + // Name of availability region (e.g. "ru-central1"), where master instances will be allocated. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type RegionalObservation struct { + + // Array of locations, where master instances will be allocated. The structure is documented below. + Location []LocationObservation `json:"location,omitempty" tf:"location,omitempty"` + + // Name of availability region (e.g. "ru-central1"), where master instances will be allocated. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type RegionalParameters struct { + + // Array of locations, where master instances will be allocated. The structure is documented below. + // +kubebuilder:validation:Optional + Location []LocationParameters `json:"location,omitempty" tf:"location,omitempty"` + + // Name of availability region (e.g. "ru-central1"), where master instances will be allocated. + // +kubebuilder:validation:Optional + Region *string `json:"region" tf:"region,omitempty"` +} + +type VersionInfoInitParameters struct { +} + +type VersionInfoObservation struct { + + // Current Kubernetes version, major.minor (e.g. 1.15). + CurrentVersion *string `json:"currentVersion,omitempty" tf:"current_version,omitempty"` + + // Boolean flag. Newer revisions may include Kubernetes patches (e.g 1.15.1 -> 1.15.2) as well as some internal component updates - new features or bug fixes in yandex-specific components either on the master or nodes. + NewRevisionAvailable *bool `json:"newRevisionAvailable,omitempty" tf:"new_revision_available,omitempty"` + + // Human readable description of the changes to be applied when updating to the latest revision. Empty if new_revision_available is false. + NewRevisionSummary *string `json:"newRevisionSummary,omitempty" tf:"new_revision_summary,omitempty"` + + // Boolean flag. The current version is on the deprecation schedule, component (master or node group) should be upgraded. + VersionDeprecated *bool `json:"versionDeprecated,omitempty" tf:"version_deprecated,omitempty"` +} + +type VersionInfoParameters struct { +} + +type ZonalInitParameters struct { + + // ID of the subnet. If no ID is specified, and there only one subnet in specified zone, an address in this subnet will be allocated. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // ID of the availability zone. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("zone",false) + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + + // Reference to a Subnet in vpc to populate zone. + // +kubebuilder:validation:Optional + ZoneRef *v1.Reference `json:"zoneRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate zone. + // +kubebuilder:validation:Optional + ZoneSelector *v1.Selector `json:"zoneSelector,omitempty" tf:"-"` +} + +type ZonalObservation struct { + + // ID of the subnet. If no ID is specified, and there only one subnet in specified zone, an address in this subnet will be allocated. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // ID of the availability zone. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type ZonalParameters struct { + + // ID of the subnet. If no ID is specified, and there only one subnet in specified zone, an address in this subnet will be allocated. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // ID of the availability zone. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("zone",false) + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + + // Reference to a Subnet in vpc to populate zone. + // +kubebuilder:validation:Optional + ZoneRef *v1.Reference `json:"zoneRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate zone. + // +kubebuilder:validation:Optional + ZoneSelector *v1.Selector `json:"zoneSelector,omitempty" tf:"-"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Cluster is the Schema for the Clusters API. Allows management of Yandex Kubernetes Cluster. For more information, see +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.master) || (has(self.initProvider) && has(self.initProvider.master))",message="spec.forProvider.master is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/kubernetes/v1alpha1/zz_generated.conversion_hubs.go b/apis/kubernetes/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..e30771a --- /dev/null +++ b/apis/kubernetes/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,9 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *NodeGroup) Hub() {} diff --git a/apis/kubernetes/v1alpha1/zz_generated.deepcopy.go b/apis/kubernetes/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..540ca8e --- /dev/null +++ b/apis/kubernetes/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,4418 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyInitParameters) DeepCopyInto(out *AllocationPolicyInitParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]AllocationPolicyLocationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyInitParameters. +func (in *AllocationPolicyInitParameters) DeepCopy() *AllocationPolicyInitParameters { + if in == nil { + return nil + } + out := new(AllocationPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyLocationInitParameters) DeepCopyInto(out *AllocationPolicyLocationInitParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyLocationInitParameters. +func (in *AllocationPolicyLocationInitParameters) DeepCopy() *AllocationPolicyLocationInitParameters { + if in == nil { + return nil + } + out := new(AllocationPolicyLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyLocationObservation) DeepCopyInto(out *AllocationPolicyLocationObservation) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyLocationObservation. +func (in *AllocationPolicyLocationObservation) DeepCopy() *AllocationPolicyLocationObservation { + if in == nil { + return nil + } + out := new(AllocationPolicyLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyLocationParameters) DeepCopyInto(out *AllocationPolicyLocationParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyLocationParameters. +func (in *AllocationPolicyLocationParameters) DeepCopy() *AllocationPolicyLocationParameters { + if in == nil { + return nil + } + out := new(AllocationPolicyLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyObservation) DeepCopyInto(out *AllocationPolicyObservation) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]AllocationPolicyLocationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyObservation. +func (in *AllocationPolicyObservation) DeepCopy() *AllocationPolicyObservation { + if in == nil { + return nil + } + out := new(AllocationPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPolicyParameters) DeepCopyInto(out *AllocationPolicyParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]AllocationPolicyLocationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPolicyParameters. +func (in *AllocationPolicyParameters) DeepCopy() *AllocationPolicyParameters { + if in == nil { + return nil + } + out := new(AllocationPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaleInitParameters) DeepCopyInto(out *AutoScaleInitParameters) { + *out = *in + if in.Initial != nil { + in, out := &in.Initial, &out.Initial + *out = new(float64) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaleInitParameters. +func (in *AutoScaleInitParameters) DeepCopy() *AutoScaleInitParameters { + if in == nil { + return nil + } + out := new(AutoScaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaleObservation) DeepCopyInto(out *AutoScaleObservation) { + *out = *in + if in.Initial != nil { + in, out := &in.Initial, &out.Initial + *out = new(float64) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaleObservation. +func (in *AutoScaleObservation) DeepCopy() *AutoScaleObservation { + if in == nil { + return nil + } + out := new(AutoScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaleParameters) DeepCopyInto(out *AutoScaleParameters) { + *out = *in + if in.Initial != nil { + in, out := &in.Initial, &out.Initial + *out = new(float64) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaleParameters. +func (in *AutoScaleParameters) DeepCopy() *AutoScaleParameters { + if in == nil { + return nil + } + out := new(AutoScaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskInitParameters) DeepCopyInto(out *BootDiskInitParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskInitParameters. +func (in *BootDiskInitParameters) DeepCopy() *BootDiskInitParameters { + if in == nil { + return nil + } + out := new(BootDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskObservation) DeepCopyInto(out *BootDiskObservation) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskObservation. +func (in *BootDiskObservation) DeepCopy() *BootDiskObservation { + if in == nil { + return nil + } + out := new(BootDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskParameters) DeepCopyInto(out *BootDiskParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskParameters. +func (in *BootDiskParameters) DeepCopy() *BootDiskParameters { + if in == nil { + return nil + } + out := new(BootDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumInitParameters) DeepCopyInto(out *CiliumInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumInitParameters. +func (in *CiliumInitParameters) DeepCopy() *CiliumInitParameters { + if in == nil { + return nil + } + out := new(CiliumInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumObservation) DeepCopyInto(out *CiliumObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumObservation. +func (in *CiliumObservation) DeepCopy() *CiliumObservation { + if in == nil { + return nil + } + out := new(CiliumObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumParameters) DeepCopyInto(out *CiliumParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumParameters. +func (in *CiliumParameters) DeepCopy() *CiliumParameters { + if in == nil { + return nil + } + out := new(CiliumParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.ClusterIPv4Range != nil { + in, out := &in.ClusterIPv4Range, &out.ClusterIPv4Range + *out = new(string) + **out = **in + } + if in.ClusterIPv6Range != nil { + in, out := &in.ClusterIPv6Range, &out.ClusterIPv6Range + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSProvider != nil { + in, out := &in.KMSProvider, &out.KMSProvider + *out = make([]KMSProviderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Master != nil { + in, out := &in.Master, &out.Master + *out = make([]MasterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NetworkImplementation != nil { + in, out := &in.NetworkImplementation, &out.NetworkImplementation + *out = make([]NetworkImplementationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkPolicyProvider != nil { + in, out := &in.NetworkPolicyProvider, &out.NetworkPolicyProvider + *out = new(string) + **out = **in + } + if in.NodeIPv4CidrMaskSize != nil { + in, out := &in.NodeIPv4CidrMaskSize, &out.NodeIPv4CidrMaskSize + *out = new(float64) + **out = **in + } + if in.NodeServiceAccountID != nil { + in, out := &in.NodeServiceAccountID, &out.NodeServiceAccountID + *out = new(string) + **out = **in + } + if in.NodeServiceAccountIDRef != nil { + in, out := &in.NodeServiceAccountIDRef, &out.NodeServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NodeServiceAccountIDSelector != nil { + in, out := &in.NodeServiceAccountIDSelector, &out.NodeServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReleaseChannel != nil { + in, out := &in.ReleaseChannel, &out.ReleaseChannel + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceIPv4Range != nil { + in, out := &in.ServiceIPv4Range, &out.ServiceIPv4Range + *out = new(string) + **out = **in + } + if in.ServiceIPv6Range != nil { + in, out := &in.ServiceIPv6Range, &out.ServiceIPv6Range + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.ClusterIPv4Range != nil { + in, out := &in.ClusterIPv4Range, &out.ClusterIPv4Range + *out = new(string) + **out = **in + } + if in.ClusterIPv6Range != nil { + in, out := &in.ClusterIPv6Range, &out.ClusterIPv6Range + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSProvider != nil { + in, out := &in.KMSProvider, &out.KMSProvider + *out = make([]KMSProviderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.Master != nil { + in, out := &in.Master, &out.Master + *out = make([]MasterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkImplementation != nil { + in, out := &in.NetworkImplementation, &out.NetworkImplementation + *out = make([]NetworkImplementationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkPolicyProvider != nil { + in, out := &in.NetworkPolicyProvider, &out.NetworkPolicyProvider + *out = new(string) + **out = **in + } + if in.NodeIPv4CidrMaskSize != nil { + in, out := &in.NodeIPv4CidrMaskSize, &out.NodeIPv4CidrMaskSize + *out = new(float64) + **out = **in + } + if in.NodeServiceAccountID != nil { + in, out := &in.NodeServiceAccountID, &out.NodeServiceAccountID + *out = new(string) + **out = **in + } + if in.ReleaseChannel != nil { + in, out := &in.ReleaseChannel, &out.ReleaseChannel + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceIPv4Range != nil { + in, out := &in.ServiceIPv4Range, &out.ServiceIPv4Range + *out = new(string) + **out = **in + } + if in.ServiceIPv6Range != nil { + in, out := &in.ServiceIPv6Range, &out.ServiceIPv6Range + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.ClusterIPv4Range != nil { + in, out := &in.ClusterIPv4Range, &out.ClusterIPv4Range + *out = new(string) + **out = **in + } + if in.ClusterIPv6Range != nil { + in, out := &in.ClusterIPv6Range, &out.ClusterIPv6Range + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSProvider != nil { + in, out := &in.KMSProvider, &out.KMSProvider + *out = make([]KMSProviderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Master != nil { + in, out := &in.Master, &out.Master + *out = make([]MasterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NetworkImplementation != nil { + in, out := &in.NetworkImplementation, &out.NetworkImplementation + *out = make([]NetworkImplementationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkPolicyProvider != nil { + in, out := &in.NetworkPolicyProvider, &out.NetworkPolicyProvider + *out = new(string) + **out = **in + } + if in.NodeIPv4CidrMaskSize != nil { + in, out := &in.NodeIPv4CidrMaskSize, &out.NodeIPv4CidrMaskSize + *out = new(float64) + **out = **in + } + if in.NodeServiceAccountID != nil { + in, out := &in.NodeServiceAccountID, &out.NodeServiceAccountID + *out = new(string) + **out = **in + } + if in.NodeServiceAccountIDRef != nil { + in, out := &in.NodeServiceAccountIDRef, &out.NodeServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NodeServiceAccountIDSelector != nil { + in, out := &in.NodeServiceAccountIDSelector, &out.NodeServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReleaseChannel != nil { + in, out := &in.ReleaseChannel, &out.ReleaseChannel + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceIPv4Range != nil { + in, out := &in.ServiceIPv4Range, &out.ServiceIPv4Range + *out = new(string) + **out = **in + } + if in.ServiceIPv6Range != nil { + in, out := &in.ServiceIPv6Range, &out.ServiceIPv6Range + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNetworkInitParameters) DeepCopyInto(out *ContainerNetworkInitParameters) { + *out = *in + if in.PodMtu != nil { + in, out := &in.PodMtu, &out.PodMtu + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNetworkInitParameters. +func (in *ContainerNetworkInitParameters) DeepCopy() *ContainerNetworkInitParameters { + if in == nil { + return nil + } + out := new(ContainerNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNetworkObservation) DeepCopyInto(out *ContainerNetworkObservation) { + *out = *in + if in.PodMtu != nil { + in, out := &in.PodMtu, &out.PodMtu + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNetworkObservation. +func (in *ContainerNetworkObservation) DeepCopy() *ContainerNetworkObservation { + if in == nil { + return nil + } + out := new(ContainerNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNetworkParameters) DeepCopyInto(out *ContainerNetworkParameters) { + *out = *in + if in.PodMtu != nil { + in, out := &in.PodMtu, &out.PodMtu + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNetworkParameters. +func (in *ContainerNetworkParameters) DeepCopy() *ContainerNetworkParameters { + if in == nil { + return nil + } + out := new(ContainerNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRuntimeInitParameters) DeepCopyInto(out *ContainerRuntimeInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntimeInitParameters. +func (in *ContainerRuntimeInitParameters) DeepCopy() *ContainerRuntimeInitParameters { + if in == nil { + return nil + } + out := new(ContainerRuntimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRuntimeObservation) DeepCopyInto(out *ContainerRuntimeObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntimeObservation. +func (in *ContainerRuntimeObservation) DeepCopy() *ContainerRuntimeObservation { + if in == nil { + return nil + } + out := new(ContainerRuntimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRuntimeParameters) DeepCopyInto(out *ContainerRuntimeParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntimeParameters. +func (in *ContainerRuntimeParameters) DeepCopy() *ContainerRuntimeParameters { + if in == nil { + return nil + } + out := new(ContainerRuntimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeployPolicyInitParameters) DeepCopyInto(out *DeployPolicyInitParameters) { + *out = *in + if in.MaxExpansion != nil { + in, out := &in.MaxExpansion, &out.MaxExpansion + *out = new(float64) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployPolicyInitParameters. +func (in *DeployPolicyInitParameters) DeepCopy() *DeployPolicyInitParameters { + if in == nil { + return nil + } + out := new(DeployPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeployPolicyObservation) DeepCopyInto(out *DeployPolicyObservation) { + *out = *in + if in.MaxExpansion != nil { + in, out := &in.MaxExpansion, &out.MaxExpansion + *out = new(float64) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployPolicyObservation. +func (in *DeployPolicyObservation) DeepCopy() *DeployPolicyObservation { + if in == nil { + return nil + } + out := new(DeployPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeployPolicyParameters) DeepCopyInto(out *DeployPolicyParameters) { + *out = *in + if in.MaxExpansion != nil { + in, out := &in.MaxExpansion, &out.MaxExpansion + *out = new(float64) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployPolicyParameters. +func (in *DeployPolicyParameters) DeepCopy() *DeployPolicyParameters { + if in == nil { + return nil + } + out := new(DeployPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleInitParameters) DeepCopyInto(out *FixedScaleInitParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleInitParameters. +func (in *FixedScaleInitParameters) DeepCopy() *FixedScaleInitParameters { + if in == nil { + return nil + } + out := new(FixedScaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleObservation) DeepCopyInto(out *FixedScaleObservation) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleObservation. +func (in *FixedScaleObservation) DeepCopy() *FixedScaleObservation { + if in == nil { + return nil + } + out := new(FixedScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleParameters) DeepCopyInto(out *FixedScaleParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleParameters. +func (in *FixedScaleParameters) DeepCopy() *FixedScaleParameters { + if in == nil { + return nil + } + out := new(FixedScaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuSettingsInitParameters) DeepCopyInto(out *GpuSettingsInitParameters) { + *out = *in + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.GpuEnvironment != nil { + in, out := &in.GpuEnvironment, &out.GpuEnvironment + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuSettingsInitParameters. +func (in *GpuSettingsInitParameters) DeepCopy() *GpuSettingsInitParameters { + if in == nil { + return nil + } + out := new(GpuSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuSettingsObservation) DeepCopyInto(out *GpuSettingsObservation) { + *out = *in + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.GpuEnvironment != nil { + in, out := &in.GpuEnvironment, &out.GpuEnvironment + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuSettingsObservation. +func (in *GpuSettingsObservation) DeepCopy() *GpuSettingsObservation { + if in == nil { + return nil + } + out := new(GpuSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuSettingsParameters) DeepCopyInto(out *GpuSettingsParameters) { + *out = *in + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.GpuEnvironment != nil { + in, out := &in.GpuEnvironment, &out.GpuEnvironment + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuSettingsParameters. +func (in *GpuSettingsParameters) DeepCopy() *GpuSettingsParameters { + if in == nil { + return nil + } + out := new(GpuSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv4DNSRecordsInitParameters) DeepCopyInto(out *IPv4DNSRecordsInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4DNSRecordsInitParameters. +func (in *IPv4DNSRecordsInitParameters) DeepCopy() *IPv4DNSRecordsInitParameters { + if in == nil { + return nil + } + out := new(IPv4DNSRecordsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv4DNSRecordsObservation) DeepCopyInto(out *IPv4DNSRecordsObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4DNSRecordsObservation. +func (in *IPv4DNSRecordsObservation) DeepCopy() *IPv4DNSRecordsObservation { + if in == nil { + return nil + } + out := new(IPv4DNSRecordsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv4DNSRecordsParameters) DeepCopyInto(out *IPv4DNSRecordsParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4DNSRecordsParameters. +func (in *IPv4DNSRecordsParameters) DeepCopy() *IPv4DNSRecordsParameters { + if in == nil { + return nil + } + out := new(IPv4DNSRecordsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6DNSRecordsInitParameters) DeepCopyInto(out *IPv6DNSRecordsInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordsInitParameters. +func (in *IPv6DNSRecordsInitParameters) DeepCopy() *IPv6DNSRecordsInitParameters { + if in == nil { + return nil + } + out := new(IPv6DNSRecordsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6DNSRecordsObservation) DeepCopyInto(out *IPv6DNSRecordsObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordsObservation. +func (in *IPv6DNSRecordsObservation) DeepCopy() *IPv6DNSRecordsObservation { + if in == nil { + return nil + } + out := new(IPv6DNSRecordsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6DNSRecordsParameters) DeepCopyInto(out *IPv6DNSRecordsParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordsParameters. +func (in *IPv6DNSRecordsParameters) DeepCopy() *IPv6DNSRecordsParameters { + if in == nil { + return nil + } + out := new(IPv6DNSRecordsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateInitParameters) DeepCopyInto(out *InstanceTemplateInitParameters) { + *out = *in + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]BootDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerNetwork != nil { + in, out := &in.ContainerNetwork, &out.ContainerNetwork + *out = make([]ContainerNetworkInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerRuntime != nil { + in, out := &in.ContainerRuntime, &out.ContainerRuntime + *out = make([]ContainerRuntimeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GpuSettings != nil { + in, out := &in.GpuSettings, &out.GpuSettings + *out = make([]GpuSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkAccelerationType != nil { + in, out := &in.NetworkAccelerationType, &out.NetworkAccelerationType + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementPolicy != nil { + in, out := &in.PlacementPolicy, &out.PlacementPolicy + *out = make([]PlacementPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = make([]SchedulingPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateInitParameters. +func (in *InstanceTemplateInitParameters) DeepCopy() *InstanceTemplateInitParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateObservation) DeepCopyInto(out *InstanceTemplateObservation) { + *out = *in + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]BootDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerNetwork != nil { + in, out := &in.ContainerNetwork, &out.ContainerNetwork + *out = make([]ContainerNetworkObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerRuntime != nil { + in, out := &in.ContainerRuntime, &out.ContainerRuntime + *out = make([]ContainerRuntimeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GpuSettings != nil { + in, out := &in.GpuSettings, &out.GpuSettings + *out = make([]GpuSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkAccelerationType != nil { + in, out := &in.NetworkAccelerationType, &out.NetworkAccelerationType + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementPolicy != nil { + in, out := &in.PlacementPolicy, &out.PlacementPolicy + *out = make([]PlacementPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = make([]SchedulingPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateObservation. +func (in *InstanceTemplateObservation) DeepCopy() *InstanceTemplateObservation { + if in == nil { + return nil + } + out := new(InstanceTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceTemplateParameters) DeepCopyInto(out *InstanceTemplateParameters) { + *out = *in + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]BootDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerNetwork != nil { + in, out := &in.ContainerNetwork, &out.ContainerNetwork + *out = make([]ContainerNetworkParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerRuntime != nil { + in, out := &in.ContainerRuntime, &out.ContainerRuntime + *out = make([]ContainerRuntimeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GpuSettings != nil { + in, out := &in.GpuSettings, &out.GpuSettings + *out = make([]GpuSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkAccelerationType != nil { + in, out := &in.NetworkAccelerationType, &out.NetworkAccelerationType + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementPolicy != nil { + in, out := &in.PlacementPolicy, &out.PlacementPolicy + *out = make([]PlacementPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = make([]SchedulingPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceTemplateParameters. +func (in *InstanceTemplateParameters) DeepCopy() *InstanceTemplateParameters { + if in == nil { + return nil + } + out := new(InstanceTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSProviderInitParameters) DeepCopyInto(out *KMSProviderInitParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSProviderInitParameters. +func (in *KMSProviderInitParameters) DeepCopy() *KMSProviderInitParameters { + if in == nil { + return nil + } + out := new(KMSProviderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSProviderObservation) DeepCopyInto(out *KMSProviderObservation) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSProviderObservation. +func (in *KMSProviderObservation) DeepCopy() *KMSProviderObservation { + if in == nil { + return nil + } + out := new(KMSProviderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSProviderParameters) DeepCopyInto(out *KMSProviderParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSProviderParameters. +func (in *KMSProviderParameters) DeepCopy() *KMSProviderParameters { + if in == nil { + return nil + } + out := new(KMSProviderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationInitParameters) DeepCopyInto(out *LocationInitParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.ZoneRef != nil { + in, out := &in.ZoneRef, &out.ZoneRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ZoneSelector != nil { + in, out := &in.ZoneSelector, &out.ZoneSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationInitParameters. +func (in *LocationInitParameters) DeepCopy() *LocationInitParameters { + if in == nil { + return nil + } + out := new(LocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationObservation) DeepCopyInto(out *LocationObservation) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationObservation. +func (in *LocationObservation) DeepCopy() *LocationObservation { + if in == nil { + return nil + } + out := new(LocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationParameters) DeepCopyInto(out *LocationParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.ZoneRef != nil { + in, out := &in.ZoneRef, &out.ZoneRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ZoneSelector != nil { + in, out := &in.ZoneSelector, &out.ZoneSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationParameters. +func (in *LocationParameters) DeepCopy() *LocationParameters { + if in == nil { + return nil + } + out := new(LocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenancePolicyInitParameters) DeepCopyInto(out *MaintenancePolicyInitParameters) { + *out = *in + if in.AutoUpgrade != nil { + in, out := &in.AutoUpgrade, &out.AutoUpgrade + *out = new(bool) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenancePolicyInitParameters. +func (in *MaintenancePolicyInitParameters) DeepCopy() *MaintenancePolicyInitParameters { + if in == nil { + return nil + } + out := new(MaintenancePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenancePolicyMaintenanceWindowInitParameters) DeepCopyInto(out *MaintenancePolicyMaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenancePolicyMaintenanceWindowInitParameters. +func (in *MaintenancePolicyMaintenanceWindowInitParameters) DeepCopy() *MaintenancePolicyMaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(MaintenancePolicyMaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenancePolicyMaintenanceWindowObservation) DeepCopyInto(out *MaintenancePolicyMaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenancePolicyMaintenanceWindowObservation. +func (in *MaintenancePolicyMaintenanceWindowObservation) DeepCopy() *MaintenancePolicyMaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(MaintenancePolicyMaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenancePolicyMaintenanceWindowParameters) DeepCopyInto(out *MaintenancePolicyMaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenancePolicyMaintenanceWindowParameters. +func (in *MaintenancePolicyMaintenanceWindowParameters) DeepCopy() *MaintenancePolicyMaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(MaintenancePolicyMaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenancePolicyObservation) DeepCopyInto(out *MaintenancePolicyObservation) { + *out = *in + if in.AutoUpgrade != nil { + in, out := &in.AutoUpgrade, &out.AutoUpgrade + *out = new(bool) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenancePolicyObservation. +func (in *MaintenancePolicyObservation) DeepCopy() *MaintenancePolicyObservation { + if in == nil { + return nil + } + out := new(MaintenancePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenancePolicyParameters) DeepCopyInto(out *MaintenancePolicyParameters) { + *out = *in + if in.AutoUpgrade != nil { + in, out := &in.AutoUpgrade, &out.AutoUpgrade + *out = new(bool) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenancePolicyParameters. +func (in *MaintenancePolicyParameters) DeepCopy() *MaintenancePolicyParameters { + if in == nil { + return nil + } + out := new(MaintenancePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowInitParameters) DeepCopyInto(out *MaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowInitParameters. +func (in *MaintenanceWindowInitParameters) DeepCopy() *MaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowObservation) DeepCopyInto(out *MaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowObservation. +func (in *MaintenanceWindowObservation) DeepCopy() *MaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowParameters) DeepCopyInto(out *MaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowParameters. +func (in *MaintenanceWindowParameters) DeepCopy() *MaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterInitParameters) DeepCopyInto(out *MasterInitParameters) { + *out = *in + if in.EtcdClusterSize != nil { + in, out := &in.EtcdClusterSize, &out.EtcdClusterSize + *out = new(float64) + **out = **in + } + if in.ExternalV6Address != nil { + in, out := &in.ExternalV6Address, &out.ExternalV6Address + *out = new(string) + **out = **in + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = make([]MaintenancePolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterLocation != nil { + in, out := &in.MasterLocation, &out.MasterLocation + *out = make([]MasterLocationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterLogging != nil { + in, out := &in.MasterLogging, &out.MasterLogging + *out = make([]MasterLoggingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(bool) + **out = **in + } + if in.Regional != nil { + in, out := &in.Regional, &out.Regional + *out = make([]RegionalInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zonal != nil { + in, out := &in.Zonal, &out.Zonal + *out = make([]ZonalInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterInitParameters. +func (in *MasterInitParameters) DeepCopy() *MasterInitParameters { + if in == nil { + return nil + } + out := new(MasterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterLocationInitParameters) DeepCopyInto(out *MasterLocationInitParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterLocationInitParameters. +func (in *MasterLocationInitParameters) DeepCopy() *MasterLocationInitParameters { + if in == nil { + return nil + } + out := new(MasterLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterLocationObservation) DeepCopyInto(out *MasterLocationObservation) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterLocationObservation. +func (in *MasterLocationObservation) DeepCopy() *MasterLocationObservation { + if in == nil { + return nil + } + out := new(MasterLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterLocationParameters) DeepCopyInto(out *MasterLocationParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterLocationParameters. +func (in *MasterLocationParameters) DeepCopy() *MasterLocationParameters { + if in == nil { + return nil + } + out := new(MasterLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterLoggingInitParameters) DeepCopyInto(out *MasterLoggingInitParameters) { + *out = *in + if in.AuditEnabled != nil { + in, out := &in.AuditEnabled, &out.AuditEnabled + *out = new(bool) + **out = **in + } + if in.ClusterAutoscalerEnabled != nil { + in, out := &in.ClusterAutoscalerEnabled, &out.ClusterAutoscalerEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventsEnabled != nil { + in, out := &in.EventsEnabled, &out.EventsEnabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.KubeApiserverEnabled != nil { + in, out := &in.KubeApiserverEnabled, &out.KubeApiserverEnabled + *out = new(bool) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.LogGroupIDRef != nil { + in, out := &in.LogGroupIDRef, &out.LogGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogGroupIDSelector != nil { + in, out := &in.LogGroupIDSelector, &out.LogGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterLoggingInitParameters. +func (in *MasterLoggingInitParameters) DeepCopy() *MasterLoggingInitParameters { + if in == nil { + return nil + } + out := new(MasterLoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterLoggingObservation) DeepCopyInto(out *MasterLoggingObservation) { + *out = *in + if in.AuditEnabled != nil { + in, out := &in.AuditEnabled, &out.AuditEnabled + *out = new(bool) + **out = **in + } + if in.ClusterAutoscalerEnabled != nil { + in, out := &in.ClusterAutoscalerEnabled, &out.ClusterAutoscalerEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventsEnabled != nil { + in, out := &in.EventsEnabled, &out.EventsEnabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.KubeApiserverEnabled != nil { + in, out := &in.KubeApiserverEnabled, &out.KubeApiserverEnabled + *out = new(bool) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterLoggingObservation. +func (in *MasterLoggingObservation) DeepCopy() *MasterLoggingObservation { + if in == nil { + return nil + } + out := new(MasterLoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterLoggingParameters) DeepCopyInto(out *MasterLoggingParameters) { + *out = *in + if in.AuditEnabled != nil { + in, out := &in.AuditEnabled, &out.AuditEnabled + *out = new(bool) + **out = **in + } + if in.ClusterAutoscalerEnabled != nil { + in, out := &in.ClusterAutoscalerEnabled, &out.ClusterAutoscalerEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventsEnabled != nil { + in, out := &in.EventsEnabled, &out.EventsEnabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.KubeApiserverEnabled != nil { + in, out := &in.KubeApiserverEnabled, &out.KubeApiserverEnabled + *out = new(bool) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.LogGroupIDRef != nil { + in, out := &in.LogGroupIDRef, &out.LogGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogGroupIDSelector != nil { + in, out := &in.LogGroupIDSelector, &out.LogGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterLoggingParameters. +func (in *MasterLoggingParameters) DeepCopy() *MasterLoggingParameters { + if in == nil { + return nil + } + out := new(MasterLoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterObservation) DeepCopyInto(out *MasterObservation) { + *out = *in + if in.ClusterCACertificate != nil { + in, out := &in.ClusterCACertificate, &out.ClusterCACertificate + *out = new(string) + **out = **in + } + if in.EtcdClusterSize != nil { + in, out := &in.EtcdClusterSize, &out.EtcdClusterSize + *out = new(float64) + **out = **in + } + if in.ExternalV4Address != nil { + in, out := &in.ExternalV4Address, &out.ExternalV4Address + *out = new(string) + **out = **in + } + if in.ExternalV4Endpoint != nil { + in, out := &in.ExternalV4Endpoint, &out.ExternalV4Endpoint + *out = new(string) + **out = **in + } + if in.ExternalV6Address != nil { + in, out := &in.ExternalV6Address, &out.ExternalV6Address + *out = new(string) + **out = **in + } + if in.ExternalV6Endpoint != nil { + in, out := &in.ExternalV6Endpoint, &out.ExternalV6Endpoint + *out = new(string) + **out = **in + } + if in.InternalV4Address != nil { + in, out := &in.InternalV4Address, &out.InternalV4Address + *out = new(string) + **out = **in + } + if in.InternalV4Endpoint != nil { + in, out := &in.InternalV4Endpoint, &out.InternalV4Endpoint + *out = new(string) + **out = **in + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = make([]MaintenancePolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterLocation != nil { + in, out := &in.MasterLocation, &out.MasterLocation + *out = make([]MasterLocationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterLogging != nil { + in, out := &in.MasterLogging, &out.MasterLogging + *out = make([]MasterLoggingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(bool) + **out = **in + } + if in.Regional != nil { + in, out := &in.Regional, &out.Regional + *out = make([]RegionalObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionInfo != nil { + in, out := &in.VersionInfo, &out.VersionInfo + *out = make([]VersionInfoObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Zonal != nil { + in, out := &in.Zonal, &out.Zonal + *out = make([]ZonalObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterObservation. +func (in *MasterObservation) DeepCopy() *MasterObservation { + if in == nil { + return nil + } + out := new(MasterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterParameters) DeepCopyInto(out *MasterParameters) { + *out = *in + if in.EtcdClusterSize != nil { + in, out := &in.EtcdClusterSize, &out.EtcdClusterSize + *out = new(float64) + **out = **in + } + if in.ExternalV6Address != nil { + in, out := &in.ExternalV6Address, &out.ExternalV6Address + *out = new(string) + **out = **in + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = make([]MaintenancePolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterLocation != nil { + in, out := &in.MasterLocation, &out.MasterLocation + *out = make([]MasterLocationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterLogging != nil { + in, out := &in.MasterLogging, &out.MasterLogging + *out = make([]MasterLoggingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(bool) + **out = **in + } + if in.Regional != nil { + in, out := &in.Regional, &out.Regional + *out = make([]RegionalParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zonal != nil { + in, out := &in.Zonal, &out.Zonal + *out = make([]ZonalParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterParameters. +func (in *MasterParameters) DeepCopy() *MasterParameters { + if in == nil { + return nil + } + out := new(MasterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkImplementationInitParameters) DeepCopyInto(out *NetworkImplementationInitParameters) { + *out = *in + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = make([]CiliumInitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkImplementationInitParameters. +func (in *NetworkImplementationInitParameters) DeepCopy() *NetworkImplementationInitParameters { + if in == nil { + return nil + } + out := new(NetworkImplementationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkImplementationObservation) DeepCopyInto(out *NetworkImplementationObservation) { + *out = *in + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = make([]CiliumParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkImplementationObservation. +func (in *NetworkImplementationObservation) DeepCopy() *NetworkImplementationObservation { + if in == nil { + return nil + } + out := new(NetworkImplementationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkImplementationParameters) DeepCopyInto(out *NetworkImplementationParameters) { + *out = *in + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = make([]CiliumParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkImplementationParameters. +func (in *NetworkImplementationParameters) DeepCopy() *NetworkImplementationParameters { + if in == nil { + return nil + } + out := new(NetworkImplementationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceInitParameters) DeepCopyInto(out *NetworkInterfaceInitParameters) { + *out = *in + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv4DNSRecords != nil { + in, out := &in.IPv4DNSRecords, &out.IPv4DNSRecords + *out = make([]IPv4DNSRecordsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6DNSRecords != nil { + in, out := &in.IPv6DNSRecords, &out.IPv6DNSRecords + *out = make([]IPv6DNSRecordsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceInitParameters. +func (in *NetworkInterfaceInitParameters) DeepCopy() *NetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceObservation) DeepCopyInto(out *NetworkInterfaceObservation) { + *out = *in + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv4DNSRecords != nil { + in, out := &in.IPv4DNSRecords, &out.IPv4DNSRecords + *out = make([]IPv4DNSRecordsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6DNSRecords != nil { + in, out := &in.IPv6DNSRecords, &out.IPv6DNSRecords + *out = make([]IPv6DNSRecordsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceObservation. +func (in *NetworkInterfaceObservation) DeepCopy() *NetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceParameters) DeepCopyInto(out *NetworkInterfaceParameters) { + *out = *in + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv4DNSRecords != nil { + in, out := &in.IPv4DNSRecords, &out.IPv4DNSRecords + *out = make([]IPv4DNSRecordsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6DNSRecords != nil { + in, out := &in.IPv6DNSRecords, &out.IPv6DNSRecords + *out = make([]IPv6DNSRecordsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceParameters. +func (in *NetworkInterfaceParameters) DeepCopy() *NetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroup) DeepCopyInto(out *NodeGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroup. +func (in *NodeGroup) DeepCopy() *NodeGroup { + if in == nil { + return nil + } + out := new(NodeGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupInitParameters) DeepCopyInto(out *NodeGroupInitParameters) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = make([]AllocationPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeployPolicy != nil { + in, out := &in.DeployPolicy, &out.DeployPolicy + *out = make([]DeployPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceTemplate != nil { + in, out := &in.InstanceTemplate, &out.InstanceTemplate + *out = make([]InstanceTemplateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = make([]NodeGroupMaintenancePolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupInitParameters. +func (in *NodeGroupInitParameters) DeepCopy() *NodeGroupInitParameters { + if in == nil { + return nil + } + out := new(NodeGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupList) DeepCopyInto(out *NodeGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupList. +func (in *NodeGroupList) DeepCopy() *NodeGroupList { + if in == nil { + return nil + } + out := new(NodeGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupMaintenancePolicyInitParameters) DeepCopyInto(out *NodeGroupMaintenancePolicyInitParameters) { + *out = *in + if in.AutoRepair != nil { + in, out := &in.AutoRepair, &out.AutoRepair + *out = new(bool) + **out = **in + } + if in.AutoUpgrade != nil { + in, out := &in.AutoUpgrade, &out.AutoUpgrade + *out = new(bool) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MaintenancePolicyMaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupMaintenancePolicyInitParameters. +func (in *NodeGroupMaintenancePolicyInitParameters) DeepCopy() *NodeGroupMaintenancePolicyInitParameters { + if in == nil { + return nil + } + out := new(NodeGroupMaintenancePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupMaintenancePolicyObservation) DeepCopyInto(out *NodeGroupMaintenancePolicyObservation) { + *out = *in + if in.AutoRepair != nil { + in, out := &in.AutoRepair, &out.AutoRepair + *out = new(bool) + **out = **in + } + if in.AutoUpgrade != nil { + in, out := &in.AutoUpgrade, &out.AutoUpgrade + *out = new(bool) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MaintenancePolicyMaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupMaintenancePolicyObservation. +func (in *NodeGroupMaintenancePolicyObservation) DeepCopy() *NodeGroupMaintenancePolicyObservation { + if in == nil { + return nil + } + out := new(NodeGroupMaintenancePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupMaintenancePolicyParameters) DeepCopyInto(out *NodeGroupMaintenancePolicyParameters) { + *out = *in + if in.AutoRepair != nil { + in, out := &in.AutoRepair, &out.AutoRepair + *out = new(bool) + **out = **in + } + if in.AutoUpgrade != nil { + in, out := &in.AutoUpgrade, &out.AutoUpgrade + *out = new(bool) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MaintenancePolicyMaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupMaintenancePolicyParameters. +func (in *NodeGroupMaintenancePolicyParameters) DeepCopy() *NodeGroupMaintenancePolicyParameters { + if in == nil { + return nil + } + out := new(NodeGroupMaintenancePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupObservation) DeepCopyInto(out *NodeGroupObservation) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = make([]AllocationPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeployPolicy != nil { + in, out := &in.DeployPolicy, &out.DeployPolicy + *out = make([]DeployPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceGroupID != nil { + in, out := &in.InstanceGroupID, &out.InstanceGroupID + *out = new(string) + **out = **in + } + if in.InstanceTemplate != nil { + in, out := &in.InstanceTemplate, &out.InstanceTemplate + *out = make([]InstanceTemplateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = make([]NodeGroupMaintenancePolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionInfo != nil { + in, out := &in.VersionInfo, &out.VersionInfo + *out = make([]NodeGroupVersionInfoObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupObservation. +func (in *NodeGroupObservation) DeepCopy() *NodeGroupObservation { + if in == nil { + return nil + } + out := new(NodeGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupParameters) DeepCopyInto(out *NodeGroupParameters) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = make([]AllocationPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeployPolicy != nil { + in, out := &in.DeployPolicy, &out.DeployPolicy + *out = make([]DeployPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceTemplate != nil { + in, out := &in.InstanceTemplate, &out.InstanceTemplate + *out = make([]InstanceTemplateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenancePolicy != nil { + in, out := &in.MaintenancePolicy, &out.MaintenancePolicy + *out = make([]NodeGroupMaintenancePolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupParameters. +func (in *NodeGroupParameters) DeepCopy() *NodeGroupParameters { + if in == nil { + return nil + } + out := new(NodeGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupSpec) DeepCopyInto(out *NodeGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupSpec. +func (in *NodeGroupSpec) DeepCopy() *NodeGroupSpec { + if in == nil { + return nil + } + out := new(NodeGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupStatus) DeepCopyInto(out *NodeGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupStatus. +func (in *NodeGroupStatus) DeepCopy() *NodeGroupStatus { + if in == nil { + return nil + } + out := new(NodeGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupVersionInfoInitParameters) DeepCopyInto(out *NodeGroupVersionInfoInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupVersionInfoInitParameters. +func (in *NodeGroupVersionInfoInitParameters) DeepCopy() *NodeGroupVersionInfoInitParameters { + if in == nil { + return nil + } + out := new(NodeGroupVersionInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupVersionInfoObservation) DeepCopyInto(out *NodeGroupVersionInfoObservation) { + *out = *in + if in.CurrentVersion != nil { + in, out := &in.CurrentVersion, &out.CurrentVersion + *out = new(string) + **out = **in + } + if in.NewRevisionAvailable != nil { + in, out := &in.NewRevisionAvailable, &out.NewRevisionAvailable + *out = new(bool) + **out = **in + } + if in.NewRevisionSummary != nil { + in, out := &in.NewRevisionSummary, &out.NewRevisionSummary + *out = new(string) + **out = **in + } + if in.VersionDeprecated != nil { + in, out := &in.VersionDeprecated, &out.VersionDeprecated + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupVersionInfoObservation. +func (in *NodeGroupVersionInfoObservation) DeepCopy() *NodeGroupVersionInfoObservation { + if in == nil { + return nil + } + out := new(NodeGroupVersionInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupVersionInfoParameters) DeepCopyInto(out *NodeGroupVersionInfoParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupVersionInfoParameters. +func (in *NodeGroupVersionInfoParameters) DeepCopy() *NodeGroupVersionInfoParameters { + if in == nil { + return nil + } + out := new(NodeGroupVersionInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementPolicyInitParameters) DeepCopyInto(out *PlacementPolicyInitParameters) { + *out = *in + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicyInitParameters. +func (in *PlacementPolicyInitParameters) DeepCopy() *PlacementPolicyInitParameters { + if in == nil { + return nil + } + out := new(PlacementPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementPolicyObservation) DeepCopyInto(out *PlacementPolicyObservation) { + *out = *in + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicyObservation. +func (in *PlacementPolicyObservation) DeepCopy() *PlacementPolicyObservation { + if in == nil { + return nil + } + out := new(PlacementPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementPolicyParameters) DeepCopyInto(out *PlacementPolicyParameters) { + *out = *in + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicyParameters. +func (in *PlacementPolicyParameters) DeepCopy() *PlacementPolicyParameters { + if in == nil { + return nil + } + out := new(PlacementPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionalInitParameters) DeepCopyInto(out *RegionalInitParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]LocationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionalInitParameters. +func (in *RegionalInitParameters) DeepCopy() *RegionalInitParameters { + if in == nil { + return nil + } + out := new(RegionalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionalObservation) DeepCopyInto(out *RegionalObservation) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]LocationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionalObservation. +func (in *RegionalObservation) DeepCopy() *RegionalObservation { + if in == nil { + return nil + } + out := new(RegionalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionalParameters) DeepCopyInto(out *RegionalParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]LocationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionalParameters. +func (in *RegionalParameters) DeepCopy() *RegionalParameters { + if in == nil { + return nil + } + out := new(RegionalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesInitParameters) DeepCopyInto(out *ResourcesInitParameters) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Gpus != nil { + in, out := &in.Gpus, &out.Gpus + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesInitParameters. +func (in *ResourcesInitParameters) DeepCopy() *ResourcesInitParameters { + if in == nil { + return nil + } + out := new(ResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesObservation) DeepCopyInto(out *ResourcesObservation) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Gpus != nil { + in, out := &in.Gpus, &out.Gpus + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesObservation. +func (in *ResourcesObservation) DeepCopy() *ResourcesObservation { + if in == nil { + return nil + } + out := new(ResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesParameters) DeepCopyInto(out *ResourcesParameters) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Gpus != nil { + in, out := &in.Gpus, &out.Gpus + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesParameters. +func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { + if in == nil { + return nil + } + out := new(ResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalePolicyInitParameters) DeepCopyInto(out *ScalePolicyInitParameters) { + *out = *in + if in.AutoScale != nil { + in, out := &in.AutoScale, &out.AutoScale + *out = make([]AutoScaleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FixedScale != nil { + in, out := &in.FixedScale, &out.FixedScale + *out = make([]FixedScaleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalePolicyInitParameters. +func (in *ScalePolicyInitParameters) DeepCopy() *ScalePolicyInitParameters { + if in == nil { + return nil + } + out := new(ScalePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalePolicyObservation) DeepCopyInto(out *ScalePolicyObservation) { + *out = *in + if in.AutoScale != nil { + in, out := &in.AutoScale, &out.AutoScale + *out = make([]AutoScaleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FixedScale != nil { + in, out := &in.FixedScale, &out.FixedScale + *out = make([]FixedScaleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalePolicyObservation. +func (in *ScalePolicyObservation) DeepCopy() *ScalePolicyObservation { + if in == nil { + return nil + } + out := new(ScalePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalePolicyParameters) DeepCopyInto(out *ScalePolicyParameters) { + *out = *in + if in.AutoScale != nil { + in, out := &in.AutoScale, &out.AutoScale + *out = make([]AutoScaleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FixedScale != nil { + in, out := &in.FixedScale, &out.FixedScale + *out = make([]FixedScaleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalePolicyParameters. +func (in *ScalePolicyParameters) DeepCopy() *ScalePolicyParameters { + if in == nil { + return nil + } + out := new(ScalePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyInitParameters) DeepCopyInto(out *SchedulingPolicyInitParameters) { + *out = *in + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyInitParameters. +func (in *SchedulingPolicyInitParameters) DeepCopy() *SchedulingPolicyInitParameters { + if in == nil { + return nil + } + out := new(SchedulingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyObservation) DeepCopyInto(out *SchedulingPolicyObservation) { + *out = *in + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyObservation. +func (in *SchedulingPolicyObservation) DeepCopy() *SchedulingPolicyObservation { + if in == nil { + return nil + } + out := new(SchedulingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyParameters) DeepCopyInto(out *SchedulingPolicyParameters) { + *out = *in + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyParameters. +func (in *SchedulingPolicyParameters) DeepCopy() *SchedulingPolicyParameters { + if in == nil { + return nil + } + out := new(SchedulingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionInfoInitParameters) DeepCopyInto(out *VersionInfoInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionInfoInitParameters. +func (in *VersionInfoInitParameters) DeepCopy() *VersionInfoInitParameters { + if in == nil { + return nil + } + out := new(VersionInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionInfoObservation) DeepCopyInto(out *VersionInfoObservation) { + *out = *in + if in.CurrentVersion != nil { + in, out := &in.CurrentVersion, &out.CurrentVersion + *out = new(string) + **out = **in + } + if in.NewRevisionAvailable != nil { + in, out := &in.NewRevisionAvailable, &out.NewRevisionAvailable + *out = new(bool) + **out = **in + } + if in.NewRevisionSummary != nil { + in, out := &in.NewRevisionSummary, &out.NewRevisionSummary + *out = new(string) + **out = **in + } + if in.VersionDeprecated != nil { + in, out := &in.VersionDeprecated, &out.VersionDeprecated + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionInfoObservation. +func (in *VersionInfoObservation) DeepCopy() *VersionInfoObservation { + if in == nil { + return nil + } + out := new(VersionInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionInfoParameters) DeepCopyInto(out *VersionInfoParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionInfoParameters. +func (in *VersionInfoParameters) DeepCopy() *VersionInfoParameters { + if in == nil { + return nil + } + out := new(VersionInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZonalInitParameters) DeepCopyInto(out *ZonalInitParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.ZoneRef != nil { + in, out := &in.ZoneRef, &out.ZoneRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ZoneSelector != nil { + in, out := &in.ZoneSelector, &out.ZoneSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZonalInitParameters. +func (in *ZonalInitParameters) DeepCopy() *ZonalInitParameters { + if in == nil { + return nil + } + out := new(ZonalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZonalObservation) DeepCopyInto(out *ZonalObservation) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZonalObservation. +func (in *ZonalObservation) DeepCopy() *ZonalObservation { + if in == nil { + return nil + } + out := new(ZonalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZonalParameters) DeepCopyInto(out *ZonalParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.ZoneRef != nil { + in, out := &in.ZoneRef, &out.ZoneRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ZoneSelector != nil { + in, out := &in.ZoneSelector, &out.ZoneSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZonalParameters. +func (in *ZonalParameters) DeepCopy() *ZonalParameters { + if in == nil { + return nil + } + out := new(ZonalParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kubernetes/v1alpha1/zz_generated.resolvers.go b/apis/kubernetes/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..859f691 --- /dev/null +++ b/apis/kubernetes/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,638 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + v1alpha13 "github.com/tagesjump/provider-upjet-yc/apis/logging/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Cluster. +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.KMSProvider); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSProvider[i3].KeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSProvider[i3].KeyIDRef, + Selector: mg.Spec.ForProvider.KMSProvider[i3].KeyIDSelector, + To: reference.To{ + List: &v1alpha11.SymmetricKeyList{}, + Managed: &v1alpha11.SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSProvider[i3].KeyID") + } + mg.Spec.ForProvider.KMSProvider[i3].KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSProvider[i3].KeyIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Master[i3].MasterLocation); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Master[i3].MasterLocation[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Master[i3].MasterLocation[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.Master[i3].MasterLocation[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Master[i3].MasterLocation[i4].SubnetID") + } + mg.Spec.ForProvider.Master[i3].MasterLocation[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Master[i3].MasterLocation[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Master[i3].MasterLogging); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Master[i3].MasterLogging[i4].LogGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Master[i3].MasterLogging[i4].LogGroupIDRef, + Selector: mg.Spec.ForProvider.Master[i3].MasterLogging[i4].LogGroupIDSelector, + To: reference.To{ + List: &v1alpha13.GroupList{}, + Managed: &v1alpha13.Group{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Master[i3].MasterLogging[i4].LogGroupID") + } + mg.Spec.ForProvider.Master[i3].MasterLogging[i4].LogGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Master[i3].MasterLogging[i4].LogGroupIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Master[i3].Regional); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Master[i3].Regional[i4].Location); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].SubnetIDRef, + Selector: mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].SubnetIDSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].SubnetID") + } + mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].SubnetIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Master[i3].Regional); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Master[i3].Regional[i4].Location); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].Zone), + Extract: resource.ExtractParamPath("zone", false), + Reference: mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].ZoneRef, + Selector: mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].ZoneSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].Zone") + } + mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].Zone = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Master[i3].Regional[i4].Location[i5].ZoneRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Master); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Master[i3].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.Master[i3].SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.Master[i3].SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha12.SecurityGroupList{}, + Managed: &v1alpha12.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Master[i3].SecurityGroupIds") + } + mg.Spec.ForProvider.Master[i3].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.Master[i3].SecurityGroupIdsRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Master[i3].Zonal); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Master[i3].Zonal[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Master[i3].Zonal[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.Master[i3].Zonal[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Master[i3].Zonal[i4].SubnetID") + } + mg.Spec.ForProvider.Master[i3].Zonal[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Master[i3].Zonal[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Master[i3].Zonal); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Master[i3].Zonal[i4].Zone), + Extract: resource.ExtractParamPath("zone", false), + Reference: mg.Spec.ForProvider.Master[i3].Zonal[i4].ZoneRef, + Selector: mg.Spec.ForProvider.Master[i3].Zonal[i4].ZoneSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Master[i3].Zonal[i4].Zone") + } + mg.Spec.ForProvider.Master[i3].Zonal[i4].Zone = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Master[i3].Zonal[i4].ZoneRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha12.NetworkList{}, + Managed: &v1alpha12.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NodeServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NodeServiceAccountIDRef, + Selector: mg.Spec.ForProvider.NodeServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.SecurityGroupList{}, + Managed: &v1alpha12.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NodeServiceAccountID") + } + mg.Spec.ForProvider.NodeServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NodeServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.SecurityGroupList{}, + Managed: &v1alpha12.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.KMSProvider); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSProvider[i3].KeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSProvider[i3].KeyIDRef, + Selector: mg.Spec.InitProvider.KMSProvider[i3].KeyIDSelector, + To: reference.To{ + List: &v1alpha11.SymmetricKeyList{}, + Managed: &v1alpha11.SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSProvider[i3].KeyID") + } + mg.Spec.InitProvider.KMSProvider[i3].KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSProvider[i3].KeyIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Master[i3].MasterLocation); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Master[i3].MasterLocation[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Master[i3].MasterLocation[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.Master[i3].MasterLocation[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Master[i3].MasterLocation[i4].SubnetID") + } + mg.Spec.InitProvider.Master[i3].MasterLocation[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Master[i3].MasterLocation[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Master[i3].MasterLogging); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Master[i3].MasterLogging[i4].LogGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Master[i3].MasterLogging[i4].LogGroupIDRef, + Selector: mg.Spec.InitProvider.Master[i3].MasterLogging[i4].LogGroupIDSelector, + To: reference.To{ + List: &v1alpha13.GroupList{}, + Managed: &v1alpha13.Group{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Master[i3].MasterLogging[i4].LogGroupID") + } + mg.Spec.InitProvider.Master[i3].MasterLogging[i4].LogGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Master[i3].MasterLogging[i4].LogGroupIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Master[i3].Regional); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Master[i3].Regional[i4].Location); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].SubnetIDRef, + Selector: mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].SubnetIDSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].SubnetID") + } + mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].SubnetIDRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Master[i3].Regional); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Master[i3].Regional[i4].Location); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].Zone), + Extract: resource.ExtractParamPath("zone", false), + Reference: mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].ZoneRef, + Selector: mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].ZoneSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].Zone") + } + mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].Zone = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Master[i3].Regional[i4].Location[i5].ZoneRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Master); i3++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Master[i3].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.Master[i3].SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.Master[i3].SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha12.SecurityGroupList{}, + Managed: &v1alpha12.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Master[i3].SecurityGroupIds") + } + mg.Spec.InitProvider.Master[i3].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.Master[i3].SecurityGroupIdsRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Master[i3].Zonal); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Master[i3].Zonal[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Master[i3].Zonal[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.Master[i3].Zonal[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Master[i3].Zonal[i4].SubnetID") + } + mg.Spec.InitProvider.Master[i3].Zonal[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Master[i3].Zonal[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Master); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Master[i3].Zonal); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Master[i3].Zonal[i4].Zone), + Extract: resource.ExtractParamPath("zone", false), + Reference: mg.Spec.InitProvider.Master[i3].Zonal[i4].ZoneRef, + Selector: mg.Spec.InitProvider.Master[i3].Zonal[i4].ZoneSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Master[i3].Zonal[i4].Zone") + } + mg.Spec.InitProvider.Master[i3].Zonal[i4].Zone = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Master[i3].Zonal[i4].ZoneRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha12.NetworkList{}, + Managed: &v1alpha12.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NodeServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NodeServiceAccountIDRef, + Selector: mg.Spec.InitProvider.NodeServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.SecurityGroupList{}, + Managed: &v1alpha12.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NodeServiceAccountID") + } + mg.Spec.InitProvider.NodeServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NodeServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.SecurityGroupList{}, + Managed: &v1alpha12.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this NodeGroup. +func (mg *NodeGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.AllocationPolicy); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.AllocationPolicy[i3].Location); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetID") + } + mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AllocationPolicy[i3].Location[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &ClusterList{}, + Managed: &Cluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha12.SecurityGroupList{}, + Managed: &v1alpha12.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds") + } + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsRefs, + Selector: mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds") + } + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.AllocationPolicy); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.AllocationPolicy[i3].Location); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetID") + } + mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AllocationPolicy[i3].Location[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &ClusterList{}, + Managed: &Cluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha12.SecurityGroupList{}, + Managed: &v1alpha12.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds") + } + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SecurityGroupIdsRefs = mrsp.ResolvedReferences + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.InstanceTemplate); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface); i4++ { + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsRefs, + Selector: mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsSelector, + To: reference.To{ + List: &v1alpha12.SubnetList{}, + Managed: &v1alpha12.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds") + } + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.InstanceTemplate[i3].NetworkInterface[i4].SubnetIdsRefs = mrsp.ResolvedReferences + + } + } + + return nil +} diff --git a/apis/kubernetes/v1alpha1/zz_groupversion_info.go b/apis/kubernetes/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..97893ad --- /dev/null +++ b/apis/kubernetes/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kubernetes.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kubernetes.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/kubernetes/v1alpha1/zz_nodegroup_terraformed.go b/apis/kubernetes/v1alpha1/zz_nodegroup_terraformed.go new file mode 100755 index 0000000..b84fe20 --- /dev/null +++ b/apis/kubernetes/v1alpha1/zz_nodegroup_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this NodeGroup +func (mg *NodeGroup) GetTerraformResourceType() string { + return "yandex_kubernetes_node_group" +} + +// GetConnectionDetailsMapping for this NodeGroup +func (tr *NodeGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this NodeGroup +func (tr *NodeGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this NodeGroup +func (tr *NodeGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this NodeGroup +func (tr *NodeGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this NodeGroup +func (tr *NodeGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this NodeGroup +func (tr *NodeGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this NodeGroup +func (tr *NodeGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this NodeGroup +func (tr *NodeGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this NodeGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *NodeGroup) LateInitialize(attrs []byte) (bool, error) { + params := &NodeGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *NodeGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kubernetes/v1alpha1/zz_nodegroup_types.go b/apis/kubernetes/v1alpha1/zz_nodegroup_types.go new file mode 100755 index 0000000..5d8f8ad --- /dev/null +++ b/apis/kubernetes/v1alpha1/zz_nodegroup_types.go @@ -0,0 +1,1092 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AllocationPolicyInitParameters struct { + + // Repeated field, that specify subnets (zones), that will be used by node group compute instances. The structure is documented below. + Location []AllocationPolicyLocationInitParameters `json:"location,omitempty" tf:"location,omitempty"` +} + +type AllocationPolicyLocationInitParameters struct { + + // ID of the subnet, that will be used by one compute instance in node group. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // ID of the availability zone where for one compute instance in node group. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type AllocationPolicyLocationObservation struct { + + // ID of the subnet, that will be used by one compute instance in node group. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // ID of the availability zone where for one compute instance in node group. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type AllocationPolicyLocationParameters struct { + + // ID of the subnet, that will be used by one compute instance in node group. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // ID of the availability zone where for one compute instance in node group. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type AllocationPolicyObservation struct { + + // Repeated field, that specify subnets (zones), that will be used by node group compute instances. The structure is documented below. + Location []AllocationPolicyLocationObservation `json:"location,omitempty" tf:"location,omitempty"` +} + +type AllocationPolicyParameters struct { + + // Repeated field, that specify subnets (zones), that will be used by node group compute instances. The structure is documented below. + // +kubebuilder:validation:Optional + Location []AllocationPolicyLocationParameters `json:"location,omitempty" tf:"location,omitempty"` +} + +type AutoScaleInitParameters struct { + + // Initial number of instances in the node group. + Initial *float64 `json:"initial,omitempty" tf:"initial,omitempty"` + + // Maximum number of instances in the node group. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum number of instances in the node group. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AutoScaleObservation struct { + + // Initial number of instances in the node group. + Initial *float64 `json:"initial,omitempty" tf:"initial,omitempty"` + + // Maximum number of instances in the node group. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum number of instances in the node group. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AutoScaleParameters struct { + + // Initial number of instances in the node group. + // +kubebuilder:validation:Optional + Initial *float64 `json:"initial" tf:"initial,omitempty"` + + // Maximum number of instances in the node group. + // +kubebuilder:validation:Optional + Max *float64 `json:"max" tf:"max,omitempty"` + + // Minimum number of instances in the node group. + // +kubebuilder:validation:Optional + Min *float64 `json:"min" tf:"min,omitempty"` +} + +type BootDiskInitParameters struct { + + // The number of instances in the node group. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // Type of container runtime. Values: docker, containerd. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type BootDiskObservation struct { + + // The number of instances in the node group. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // Type of container runtime. Values: docker, containerd. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type BootDiskParameters struct { + + // The number of instances in the node group. + // +kubebuilder:validation:Optional + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // Type of container runtime. Values: docker, containerd. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ContainerNetworkInitParameters struct { + + // MTU for pods. + PodMtu *float64 `json:"podMtu,omitempty" tf:"pod_mtu,omitempty"` +} + +type ContainerNetworkObservation struct { + + // MTU for pods. + PodMtu *float64 `json:"podMtu,omitempty" tf:"pod_mtu,omitempty"` +} + +type ContainerNetworkParameters struct { + + // MTU for pods. + // +kubebuilder:validation:Optional + PodMtu *float64 `json:"podMtu,omitempty" tf:"pod_mtu,omitempty"` +} + +type ContainerRuntimeInitParameters struct { + + // Type of container runtime. Values: docker, containerd. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ContainerRuntimeObservation struct { + + // Type of container runtime. Values: docker, containerd. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ContainerRuntimeParameters struct { + + // Type of container runtime. Values: docker, containerd. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type DeployPolicyInitParameters struct { + + // The maximum number of instances that can be temporarily allocated above the group's target size during the update. + MaxExpansion *float64 `json:"maxExpansion,omitempty" tf:"max_expansion,omitempty"` + + // The maximum number of running instances that can be taken offline during update. + MaxUnavailable *float64 `json:"maxUnavailable,omitempty" tf:"max_unavailable,omitempty"` +} + +type DeployPolicyObservation struct { + + // The maximum number of instances that can be temporarily allocated above the group's target size during the update. + MaxExpansion *float64 `json:"maxExpansion,omitempty" tf:"max_expansion,omitempty"` + + // The maximum number of running instances that can be taken offline during update. + MaxUnavailable *float64 `json:"maxUnavailable,omitempty" tf:"max_unavailable,omitempty"` +} + +type DeployPolicyParameters struct { + + // The maximum number of instances that can be temporarily allocated above the group's target size during the update. + // +kubebuilder:validation:Optional + MaxExpansion *float64 `json:"maxExpansion" tf:"max_expansion,omitempty"` + + // The maximum number of running instances that can be taken offline during update. + // +kubebuilder:validation:Optional + MaxUnavailable *float64 `json:"maxUnavailable" tf:"max_unavailable,omitempty"` +} + +type FixedScaleInitParameters struct { + + // The number of instances in the node group. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type FixedScaleObservation struct { + + // The number of instances in the node group. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type FixedScaleParameters struct { + + // The number of instances in the node group. + // +kubebuilder:validation:Optional + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type GpuSettingsInitParameters struct { + + // GPU cluster id. + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + // GPU environment. Values: runc, runc_drivers_cuda. + GpuEnvironment *string `json:"gpuEnvironment,omitempty" tf:"gpu_environment,omitempty"` +} + +type GpuSettingsObservation struct { + + // GPU cluster id. + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + // GPU environment. Values: runc, runc_drivers_cuda. + GpuEnvironment *string `json:"gpuEnvironment,omitempty" tf:"gpu_environment,omitempty"` +} + +type GpuSettingsParameters struct { + + // GPU cluster id. + // +kubebuilder:validation:Optional + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + // GPU environment. Values: runc, runc_drivers_cuda. + // +kubebuilder:validation:Optional + GpuEnvironment *string `json:"gpuEnvironment,omitempty" tf:"gpu_environment,omitempty"` +} + +type IPv4DNSRecordsInitParameters struct { + + // DNS zone ID (if not set, private zone is used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL (in seconds). + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type IPv4DNSRecordsObservation struct { + + // DNS zone ID (if not set, private zone is used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL (in seconds). + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type IPv4DNSRecordsParameters struct { + + // DNS zone ID (if not set, private zone is used). + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN. + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + // +kubebuilder:validation:Optional + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL (in seconds). + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type IPv6DNSRecordsInitParameters struct { + + // DNS zone ID (if not set, private zone is used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL (in seconds). + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type IPv6DNSRecordsObservation struct { + + // DNS zone ID (if not set, private zone is used). + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL (in seconds). + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type IPv6DNSRecordsParameters struct { + + // DNS zone ID (if not set, private zone is used). + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // DNS record FQDN. + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + + // When set to true, also create a PTR DNS record. + // +kubebuilder:validation:Optional + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // DNS record TTL (in seconds). + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type InstanceTemplateInitParameters struct { + + // The specifications for boot disks that will be attached to the instance. The structure is documented below. + BootDisk []BootDiskInitParameters `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` + + // Container network configuration. The structure is documented below. + ContainerNetwork []ContainerNetworkInitParameters `json:"containerNetwork,omitempty" tf:"container_network,omitempty"` + + // Container runtime configuration. The structure is documented below. + ContainerRuntime []ContainerRuntimeInitParameters `json:"containerRuntime,omitempty" tf:"container_runtime,omitempty"` + + // GPU settings. The structure is documented below. + GpuSettings []GpuSettingsInitParameters `json:"gpuSettings,omitempty" tf:"gpu_settings,omitempty"` + + // Labels that will be assigned to compute nodes (instances), created by the Node Group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The set of metadata key:value pairs assigned to this instance template. This includes custom metadata and predefined keys. Note: key "user-data" won't be provided into instances. It reserved for internal activity in kubernetes_node_group resource. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Boolean flag, enables NAT for node group compute instances. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // Name template of the instance. In order to be unique it must contain at least one of instance unique placeholders: + // {instance.short_id} + // {instance.index} + // combination of {instance.zone_id} and {instance.index_in_zone} + // Example: my-instance-{instance.index} + // If not set, default is used: {instance_group.id}-{instance.short_id} + // It may also contain another placeholders, see Compute Instance group metadata doc for full list. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of network acceleration. Values: standard, software_accelerated. + NetworkAccelerationType *string `json:"networkAccelerationType,omitempty" tf:"network_acceleration_type,omitempty"` + + // An array with the network interfaces that will be attached to the instance. The structure is documented below. + NetworkInterface []NetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // The placement policy configuration. The structure is documented below. + PlacementPolicy []PlacementPolicyInitParameters `json:"placementPolicy,omitempty" tf:"placement_policy,omitempty"` + + // The ID of the hardware platform configuration for the node group compute instances. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // The scheduling policy for the instances in node group. The structure is documented below. + SchedulingPolicy []SchedulingPolicyInitParameters `json:"schedulingPolicy,omitempty" tf:"scheduling_policy,omitempty"` +} + +type InstanceTemplateObservation struct { + + // The specifications for boot disks that will be attached to the instance. The structure is documented below. + BootDisk []BootDiskObservation `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` + + // Container network configuration. The structure is documented below. + ContainerNetwork []ContainerNetworkObservation `json:"containerNetwork,omitempty" tf:"container_network,omitempty"` + + // Container runtime configuration. The structure is documented below. + ContainerRuntime []ContainerRuntimeObservation `json:"containerRuntime,omitempty" tf:"container_runtime,omitempty"` + + // GPU settings. The structure is documented below. + GpuSettings []GpuSettingsObservation `json:"gpuSettings,omitempty" tf:"gpu_settings,omitempty"` + + // Labels that will be assigned to compute nodes (instances), created by the Node Group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The set of metadata key:value pairs assigned to this instance template. This includes custom metadata and predefined keys. Note: key "user-data" won't be provided into instances. It reserved for internal activity in kubernetes_node_group resource. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Boolean flag, enables NAT for node group compute instances. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // Name template of the instance. In order to be unique it must contain at least one of instance unique placeholders: + // {instance.short_id} + // {instance.index} + // combination of {instance.zone_id} and {instance.index_in_zone} + // Example: my-instance-{instance.index} + // If not set, default is used: {instance_group.id}-{instance.short_id} + // It may also contain another placeholders, see Compute Instance group metadata doc for full list. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of network acceleration. Values: standard, software_accelerated. + NetworkAccelerationType *string `json:"networkAccelerationType,omitempty" tf:"network_acceleration_type,omitempty"` + + // An array with the network interfaces that will be attached to the instance. The structure is documented below. + NetworkInterface []NetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // The placement policy configuration. The structure is documented below. + PlacementPolicy []PlacementPolicyObservation `json:"placementPolicy,omitempty" tf:"placement_policy,omitempty"` + + // The ID of the hardware platform configuration for the node group compute instances. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // The scheduling policy for the instances in node group. The structure is documented below. + SchedulingPolicy []SchedulingPolicyObservation `json:"schedulingPolicy,omitempty" tf:"scheduling_policy,omitempty"` +} + +type InstanceTemplateParameters struct { + + // The specifications for boot disks that will be attached to the instance. The structure is documented below. + // +kubebuilder:validation:Optional + BootDisk []BootDiskParameters `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` + + // Container network configuration. The structure is documented below. + // +kubebuilder:validation:Optional + ContainerNetwork []ContainerNetworkParameters `json:"containerNetwork,omitempty" tf:"container_network,omitempty"` + + // Container runtime configuration. The structure is documented below. + // +kubebuilder:validation:Optional + ContainerRuntime []ContainerRuntimeParameters `json:"containerRuntime,omitempty" tf:"container_runtime,omitempty"` + + // GPU settings. The structure is documented below. + // +kubebuilder:validation:Optional + GpuSettings []GpuSettingsParameters `json:"gpuSettings,omitempty" tf:"gpu_settings,omitempty"` + + // Labels that will be assigned to compute nodes (instances), created by the Node Group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The set of metadata key:value pairs assigned to this instance template. This includes custom metadata and predefined keys. Note: key "user-data" won't be provided into instances. It reserved for internal activity in kubernetes_node_group resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Boolean flag, enables NAT for node group compute instances. + // +kubebuilder:validation:Optional + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // Name template of the instance. In order to be unique it must contain at least one of instance unique placeholders: + // {instance.short_id} + // {instance.index} + // combination of {instance.zone_id} and {instance.index_in_zone} + // Example: my-instance-{instance.index} + // If not set, default is used: {instance_group.id}-{instance.short_id} + // It may also contain another placeholders, see Compute Instance group metadata doc for full list. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of network acceleration. Values: standard, software_accelerated. + // +kubebuilder:validation:Optional + NetworkAccelerationType *string `json:"networkAccelerationType,omitempty" tf:"network_acceleration_type,omitempty"` + + // An array with the network interfaces that will be attached to the instance. The structure is documented below. + // +kubebuilder:validation:Optional + NetworkInterface []NetworkInterfaceParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // The placement policy configuration. The structure is documented below. + // +kubebuilder:validation:Optional + PlacementPolicy []PlacementPolicyParameters `json:"placementPolicy,omitempty" tf:"placement_policy,omitempty"` + + // The ID of the hardware platform configuration for the node group compute instances. + // +kubebuilder:validation:Optional + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // +kubebuilder:validation:Optional + Resources []ResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // The scheduling policy for the instances in node group. The structure is documented below. + // +kubebuilder:validation:Optional + SchedulingPolicy []SchedulingPolicyParameters `json:"schedulingPolicy,omitempty" tf:"scheduling_policy,omitempty"` +} + +type MaintenancePolicyMaintenanceWindowInitParameters struct { + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type MaintenancePolicyMaintenanceWindowObservation struct { + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type MaintenancePolicyMaintenanceWindowParameters struct { + + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // +kubebuilder:validation:Optional + Duration *string `json:"duration" tf:"duration,omitempty"` + + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime" tf:"start_time,omitempty"` +} + +type NetworkInterfaceInitParameters struct { + + // Allocate an IPv4 address for the interface. The default value is true. + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + // List of configurations for creating ipv4 DNS records. The structure is documented below. + IPv4DNSRecords []IPv4DNSRecordsInitParameters `json:"ipv4DnsRecords,omitempty" tf:"ipv4_dns_records,omitempty"` + + // If true, allocate an IPv6 address for the interface. The address will be automatically assigned from the specified subnet. + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // List of configurations for creating ipv6 DNS records. The structure is documented below. + IPv6DNSRecords []IPv6DNSRecordsInitParameters `json:"ipv6DnsRecords,omitempty" tf:"ipv6_dns_records,omitempty"` + + // A public address that can be used to access the internet over NAT. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // Security group ids for network interface. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // The IDs of the subnets. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` +} + +type NetworkInterfaceObservation struct { + + // Allocate an IPv4 address for the interface. The default value is true. + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + // List of configurations for creating ipv4 DNS records. The structure is documented below. + IPv4DNSRecords []IPv4DNSRecordsObservation `json:"ipv4DnsRecords,omitempty" tf:"ipv4_dns_records,omitempty"` + + // If true, allocate an IPv6 address for the interface. The address will be automatically assigned from the specified subnet. + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // List of configurations for creating ipv6 DNS records. The structure is documented below. + IPv6DNSRecords []IPv6DNSRecordsObservation `json:"ipv6DnsRecords,omitempty" tf:"ipv6_dns_records,omitempty"` + + // A public address that can be used to access the internet over NAT. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // Security group ids for network interface. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The IDs of the subnets. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type NetworkInterfaceParameters struct { + + // Allocate an IPv4 address for the interface. The default value is true. + // +kubebuilder:validation:Optional + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + // List of configurations for creating ipv4 DNS records. The structure is documented below. + // +kubebuilder:validation:Optional + IPv4DNSRecords []IPv4DNSRecordsParameters `json:"ipv4DnsRecords,omitempty" tf:"ipv4_dns_records,omitempty"` + + // If true, allocate an IPv6 address for the interface. The address will be automatically assigned from the specified subnet. + // +kubebuilder:validation:Optional + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // List of configurations for creating ipv6 DNS records. The structure is documented below. + // +kubebuilder:validation:Optional + IPv6DNSRecords []IPv6DNSRecordsParameters `json:"ipv6DnsRecords,omitempty" tf:"ipv6_dns_records,omitempty"` + + // A public address that can be used to access the internet over NAT. + // +kubebuilder:validation:Optional + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + + // Security group ids for network interface. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // The IDs of the subnets. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` +} + +type NodeGroupInitParameters struct { + + // This argument specify subnets (zones), that will be used by node group compute instances. The structure is documented below. + AllocationPolicy []AllocationPolicyInitParameters `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + + // A list of allowed unsafe sysctl parameters for this node group. For more details see documentation. + AllowedUnsafeSysctls []*string `json:"allowedUnsafeSysctls,omitempty" tf:"allowed_unsafe_sysctls,omitempty"` + + // The ID of the Kubernetes cluster that this node group belongs to. + // +crossplane:generate:reference:type=Cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Reference to a Cluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + + // Selector for a Cluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + + // Deploy policy of the node group. The structure is documented below. + DeployPolicy []DeployPolicyInitParameters `json:"deployPolicy,omitempty" tf:"deploy_policy,omitempty"` + + // A description of the Kubernetes node group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Template used to create compute instances in this Kubernetes node group. The structure is documented below. + InstanceTemplate []InstanceTemplateInitParameters `json:"instanceTemplate,omitempty" tf:"instance_template,omitempty"` + + // A set of key/value label pairs assigned to the Kubernetes node group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // (Computed) Maintenance policy for this Kubernetes node group. If policy is omitted, automatic revision upgrades are enabled and could happen at any time. Revision upgrades are performed only within the same minor version, e.g. 1.13. Minor version upgrades (e.g. 1.13->1.14) should be performed manually. The structure is documented below. + MaintenancePolicy []NodeGroupMaintenancePolicyInitParameters `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Name of a specific Kubernetes node group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A set of key/value label pairs, that are assigned to all the nodes of this Kubernetes node group. + // +mapType=granular + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // A list of Kubernetes taints, that are applied to all the nodes of this Kubernetes node group. + NodeTaints []*string `json:"nodeTaints,omitempty" tf:"node_taints,omitempty"` + + // Scale policy of the node group. The structure is documented below. + ScalePolicy []ScalePolicyInitParameters `json:"scalePolicy,omitempty" tf:"scale_policy,omitempty"` + + // Version of Kubernetes that will be used for Kubernetes node group. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type NodeGroupMaintenancePolicyInitParameters struct { + + // Boolean flag that specifies if node group can be repaired automatically. When omitted, default value is TRUE. + AutoRepair *bool `json:"autoRepair,omitempty" tf:"auto_repair,omitempty"` + + // Boolean flag that specifies if node group can be upgraded automatically. When omitted, default value is TRUE. + AutoUpgrade *bool `json:"autoUpgrade,omitempty" tf:"auto_upgrade,omitempty"` + + // (Computed) Set of day intervals, when maintenance is allowed for this node group. When omitted, it defaults to any time. + MaintenanceWindow []MaintenancePolicyMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` +} + +type NodeGroupMaintenancePolicyObservation struct { + + // Boolean flag that specifies if node group can be repaired automatically. When omitted, default value is TRUE. + AutoRepair *bool `json:"autoRepair,omitempty" tf:"auto_repair,omitempty"` + + // Boolean flag that specifies if node group can be upgraded automatically. When omitted, default value is TRUE. + AutoUpgrade *bool `json:"autoUpgrade,omitempty" tf:"auto_upgrade,omitempty"` + + // (Computed) Set of day intervals, when maintenance is allowed for this node group. When omitted, it defaults to any time. + MaintenanceWindow []MaintenancePolicyMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` +} + +type NodeGroupMaintenancePolicyParameters struct { + + // Boolean flag that specifies if node group can be repaired automatically. When omitted, default value is TRUE. + // +kubebuilder:validation:Optional + AutoRepair *bool `json:"autoRepair" tf:"auto_repair,omitempty"` + + // Boolean flag that specifies if node group can be upgraded automatically. When omitted, default value is TRUE. + // +kubebuilder:validation:Optional + AutoUpgrade *bool `json:"autoUpgrade" tf:"auto_upgrade,omitempty"` + + // (Computed) Set of day intervals, when maintenance is allowed for this node group. When omitted, it defaults to any time. + // +kubebuilder:validation:Optional + MaintenanceWindow []MaintenancePolicyMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` +} + +type NodeGroupObservation struct { + + // This argument specify subnets (zones), that will be used by node group compute instances. The structure is documented below. + AllocationPolicy []AllocationPolicyObservation `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + + // A list of allowed unsafe sysctl parameters for this node group. For more details see documentation. + AllowedUnsafeSysctls []*string `json:"allowedUnsafeSysctls,omitempty" tf:"allowed_unsafe_sysctls,omitempty"` + + // The ID of the Kubernetes cluster that this node group belongs to. + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // (Computed) The Kubernetes node group creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Deploy policy of the node group. The structure is documented below. + DeployPolicy []DeployPolicyObservation `json:"deployPolicy,omitempty" tf:"deploy_policy,omitempty"` + + // A description of the Kubernetes node group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (Computed) ID of instance group that is used to manage this Kubernetes node group. + InstanceGroupID *string `json:"instanceGroupId,omitempty" tf:"instance_group_id,omitempty"` + + // Template used to create compute instances in this Kubernetes node group. The structure is documented below. + InstanceTemplate []InstanceTemplateObservation `json:"instanceTemplate,omitempty" tf:"instance_template,omitempty"` + + // A set of key/value label pairs assigned to the Kubernetes node group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // (Computed) Maintenance policy for this Kubernetes node group. If policy is omitted, automatic revision upgrades are enabled and could happen at any time. Revision upgrades are performed only within the same minor version, e.g. 1.13. Minor version upgrades (e.g. 1.13->1.14) should be performed manually. The structure is documented below. + MaintenancePolicy []NodeGroupMaintenancePolicyObservation `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Name of a specific Kubernetes node group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A set of key/value label pairs, that are assigned to all the nodes of this Kubernetes node group. + // +mapType=granular + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // A list of Kubernetes taints, that are applied to all the nodes of this Kubernetes node group. + NodeTaints []*string `json:"nodeTaints,omitempty" tf:"node_taints,omitempty"` + + // Scale policy of the node group. The structure is documented below. + ScalePolicy []ScalePolicyObservation `json:"scalePolicy,omitempty" tf:"scale_policy,omitempty"` + + // (Computed) Status of the Kubernetes node group. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Version of Kubernetes that will be used for Kubernetes node group. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // (Computed) Information about Kubernetes node group version. The structure is documented below. + VersionInfo []NodeGroupVersionInfoObservation `json:"versionInfo,omitempty" tf:"version_info,omitempty"` +} + +type NodeGroupParameters struct { + + // This argument specify subnets (zones), that will be used by node group compute instances. The structure is documented below. + // +kubebuilder:validation:Optional + AllocationPolicy []AllocationPolicyParameters `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + + // A list of allowed unsafe sysctl parameters for this node group. For more details see documentation. + // +kubebuilder:validation:Optional + AllowedUnsafeSysctls []*string `json:"allowedUnsafeSysctls,omitempty" tf:"allowed_unsafe_sysctls,omitempty"` + + // The ID of the Kubernetes cluster that this node group belongs to. + // +crossplane:generate:reference:type=Cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Reference to a Cluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + + // Selector for a Cluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + + // Deploy policy of the node group. The structure is documented below. + // +kubebuilder:validation:Optional + DeployPolicy []DeployPolicyParameters `json:"deployPolicy,omitempty" tf:"deploy_policy,omitempty"` + + // A description of the Kubernetes node group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Template used to create compute instances in this Kubernetes node group. The structure is documented below. + // +kubebuilder:validation:Optional + InstanceTemplate []InstanceTemplateParameters `json:"instanceTemplate,omitempty" tf:"instance_template,omitempty"` + + // A set of key/value label pairs assigned to the Kubernetes node group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // (Computed) Maintenance policy for this Kubernetes node group. If policy is omitted, automatic revision upgrades are enabled and could happen at any time. Revision upgrades are performed only within the same minor version, e.g. 1.13. Minor version upgrades (e.g. 1.13->1.14) should be performed manually. The structure is documented below. + // +kubebuilder:validation:Optional + MaintenancePolicy []NodeGroupMaintenancePolicyParameters `json:"maintenancePolicy,omitempty" tf:"maintenance_policy,omitempty"` + + // Name of a specific Kubernetes node group. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A set of key/value label pairs, that are assigned to all the nodes of this Kubernetes node group. + // +kubebuilder:validation:Optional + // +mapType=granular + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // A list of Kubernetes taints, that are applied to all the nodes of this Kubernetes node group. + // +kubebuilder:validation:Optional + NodeTaints []*string `json:"nodeTaints,omitempty" tf:"node_taints,omitempty"` + + // Scale policy of the node group. The structure is documented below. + // +kubebuilder:validation:Optional + ScalePolicy []ScalePolicyParameters `json:"scalePolicy,omitempty" tf:"scale_policy,omitempty"` + + // Version of Kubernetes that will be used for Kubernetes node group. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type NodeGroupVersionInfoInitParameters struct { +} + +type NodeGroupVersionInfoObservation struct { + + // Current Kubernetes version, major.minor (e.g. 1.15). + CurrentVersion *string `json:"currentVersion,omitempty" tf:"current_version,omitempty"` + + // True/false flag. Newer revisions may include Kubernetes patches (e.g 1.15.1 -> 1.15.2) as well as some internal component updates - new features or bug fixes in yandex-specific components either on the master or nodes. + NewRevisionAvailable *bool `json:"newRevisionAvailable,omitempty" tf:"new_revision_available,omitempty"` + + // Human readable description of the changes to be applied when updating to the latest revision. Empty if new_revision_available is false. + NewRevisionSummary *string `json:"newRevisionSummary,omitempty" tf:"new_revision_summary,omitempty"` + + // True/false flag. The current version is on the deprecation schedule, component (master or node group) should be upgraded. + VersionDeprecated *bool `json:"versionDeprecated,omitempty" tf:"version_deprecated,omitempty"` +} + +type NodeGroupVersionInfoParameters struct { +} + +type PlacementPolicyInitParameters struct { + + // Specifies the id of the Placement Group to assign to the instances. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` +} + +type PlacementPolicyObservation struct { + + // Specifies the id of the Placement Group to assign to the instances. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` +} + +type PlacementPolicyParameters struct { + + // Specifies the id of the Placement Group to assign to the instances. + // +kubebuilder:validation:Optional + PlacementGroupID *string `json:"placementGroupId" tf:"placement_group_id,omitempty"` +} + +type ResourcesInitParameters struct { + + // Baseline core performance as a percent. + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // Number of CPU cores allocated to the instance. + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + // Number of GPU cores allocated to the instance. + Gpus *float64 `json:"gpus,omitempty" tf:"gpus,omitempty"` + + // The memory size allocated to the instance. + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type ResourcesObservation struct { + + // Baseline core performance as a percent. + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // Number of CPU cores allocated to the instance. + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + // Number of GPU cores allocated to the instance. + Gpus *float64 `json:"gpus,omitempty" tf:"gpus,omitempty"` + + // The memory size allocated to the instance. + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type ResourcesParameters struct { + + // Baseline core performance as a percent. + // +kubebuilder:validation:Optional + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // Number of CPU cores allocated to the instance. + // +kubebuilder:validation:Optional + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + // Number of GPU cores allocated to the instance. + // +kubebuilder:validation:Optional + Gpus *float64 `json:"gpus,omitempty" tf:"gpus,omitempty"` + + // The memory size allocated to the instance. + // +kubebuilder:validation:Optional + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type ScalePolicyInitParameters struct { + + // Scale policy for an autoscaled node group. The structure is documented below. + AutoScale []AutoScaleInitParameters `json:"autoScale,omitempty" tf:"auto_scale,omitempty"` + + // Scale policy for a fixed scale node group. The structure is documented below. + FixedScale []FixedScaleInitParameters `json:"fixedScale,omitempty" tf:"fixed_scale,omitempty"` +} + +type ScalePolicyObservation struct { + + // Scale policy for an autoscaled node group. The structure is documented below. + AutoScale []AutoScaleObservation `json:"autoScale,omitempty" tf:"auto_scale,omitempty"` + + // Scale policy for a fixed scale node group. The structure is documented below. + FixedScale []FixedScaleObservation `json:"fixedScale,omitempty" tf:"fixed_scale,omitempty"` +} + +type ScalePolicyParameters struct { + + // Scale policy for an autoscaled node group. The structure is documented below. + // +kubebuilder:validation:Optional + AutoScale []AutoScaleParameters `json:"autoScale,omitempty" tf:"auto_scale,omitempty"` + + // Scale policy for a fixed scale node group. The structure is documented below. + // +kubebuilder:validation:Optional + FixedScale []FixedScaleParameters `json:"fixedScale,omitempty" tf:"fixed_scale,omitempty"` +} + +type SchedulingPolicyInitParameters struct { + + // Specifies if the instance is preemptible. Defaults to false. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` +} + +type SchedulingPolicyObservation struct { + + // Specifies if the instance is preemptible. Defaults to false. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` +} + +type SchedulingPolicyParameters struct { + + // Specifies if the instance is preemptible. Defaults to false. + // +kubebuilder:validation:Optional + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` +} + +// NodeGroupSpec defines the desired state of NodeGroup +type NodeGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider NodeGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider NodeGroupInitParameters `json:"initProvider,omitempty"` +} + +// NodeGroupStatus defines the observed state of NodeGroup. +type NodeGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider NodeGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// NodeGroup is the Schema for the NodeGroups API. Allows management of Yandex Kubernetes Node Group. For more information, see +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type NodeGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instanceTemplate) || (has(self.initProvider) && has(self.initProvider.instanceTemplate))",message="spec.forProvider.instanceTemplate is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scalePolicy) || (has(self.initProvider) && has(self.initProvider.scalePolicy))",message="spec.forProvider.scalePolicy is a required parameter" + Spec NodeGroupSpec `json:"spec"` + Status NodeGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NodeGroupList contains a list of NodeGroups +type NodeGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodeGroup `json:"items"` +} + +// Repository type metadata. +var ( + NodeGroup_Kind = "NodeGroup" + NodeGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: NodeGroup_Kind}.String() + NodeGroup_KindAPIVersion = NodeGroup_Kind + "." + CRDGroupVersion.String() + NodeGroup_GroupVersionKind = CRDGroupVersion.WithKind(NodeGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&NodeGroup{}, &NodeGroupList{}) +} diff --git a/apis/lb/v1alpha1/zz_generated.conversion_hubs.go b/apis/lb/v1alpha1/zz_generated.conversion_hubs.go index 21ccf2d..d2bf9ed 100755 --- a/apis/lb/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/lb/v1alpha1/zz_generated.conversion_hubs.go @@ -1,13 +1,9 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *NetworkLoadBalancer) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *NetworkLoadBalancer) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *TargetGroup) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *TargetGroup) Hub() {} diff --git a/apis/lb/v1alpha1/zz_generated.deepcopy.go b/apis/lb/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..19dbdb0 --- /dev/null +++ b/apis/lb/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1482 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedTargetGroupInitParameters) DeepCopyInto(out *AttachedTargetGroupInitParameters) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]HealthcheckInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupID != nil { + in, out := &in.TargetGroupID, &out.TargetGroupID + *out = new(string) + **out = **in + } + if in.TargetGroupIDRef != nil { + in, out := &in.TargetGroupIDRef, &out.TargetGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupIDSelector != nil { + in, out := &in.TargetGroupIDSelector, &out.TargetGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedTargetGroupInitParameters. +func (in *AttachedTargetGroupInitParameters) DeepCopy() *AttachedTargetGroupInitParameters { + if in == nil { + return nil + } + out := new(AttachedTargetGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedTargetGroupObservation) DeepCopyInto(out *AttachedTargetGroupObservation) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]HealthcheckObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupID != nil { + in, out := &in.TargetGroupID, &out.TargetGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedTargetGroupObservation. +func (in *AttachedTargetGroupObservation) DeepCopy() *AttachedTargetGroupObservation { + if in == nil { + return nil + } + out := new(AttachedTargetGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedTargetGroupParameters) DeepCopyInto(out *AttachedTargetGroupParameters) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = make([]HealthcheckParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupID != nil { + in, out := &in.TargetGroupID, &out.TargetGroupID + *out = new(string) + **out = **in + } + if in.TargetGroupIDRef != nil { + in, out := &in.TargetGroupIDRef, &out.TargetGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupIDSelector != nil { + in, out := &in.TargetGroupIDSelector, &out.TargetGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedTargetGroupParameters. +func (in *AttachedTargetGroupParameters) DeepCopy() *AttachedTargetGroupParameters { + if in == nil { + return nil + } + out := new(AttachedTargetGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalAddressSpecInitParameters) DeepCopyInto(out *ExternalAddressSpecInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.IPVersion != nil { + in, out := &in.IPVersion, &out.IPVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAddressSpecInitParameters. +func (in *ExternalAddressSpecInitParameters) DeepCopy() *ExternalAddressSpecInitParameters { + if in == nil { + return nil + } + out := new(ExternalAddressSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalAddressSpecObservation) DeepCopyInto(out *ExternalAddressSpecObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.IPVersion != nil { + in, out := &in.IPVersion, &out.IPVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAddressSpecObservation. +func (in *ExternalAddressSpecObservation) DeepCopy() *ExternalAddressSpecObservation { + if in == nil { + return nil + } + out := new(ExternalAddressSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalAddressSpecParameters) DeepCopyInto(out *ExternalAddressSpecParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.IPVersion != nil { + in, out := &in.IPVersion, &out.IPVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAddressSpecParameters. +func (in *ExternalAddressSpecParameters) DeepCopy() *ExternalAddressSpecParameters { + if in == nil { + return nil + } + out := new(ExternalAddressSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPOptionsInitParameters) DeepCopyInto(out *HTTPOptionsInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPOptionsInitParameters. +func (in *HTTPOptionsInitParameters) DeepCopy() *HTTPOptionsInitParameters { + if in == nil { + return nil + } + out := new(HTTPOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPOptionsObservation) DeepCopyInto(out *HTTPOptionsObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPOptionsObservation. +func (in *HTTPOptionsObservation) DeepCopy() *HTTPOptionsObservation { + if in == nil { + return nil + } + out := new(HTTPOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPOptionsParameters) DeepCopyInto(out *HTTPOptionsParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPOptionsParameters. +func (in *HTTPOptionsParameters) DeepCopy() *HTTPOptionsParameters { + if in == nil { + return nil + } + out := new(HTTPOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckInitParameters) DeepCopyInto(out *HealthcheckInitParameters) { + *out = *in + if in.HTTPOptions != nil { + in, out := &in.HTTPOptions, &out.HTTPOptions + *out = make([]HTTPOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TCPOptions != nil { + in, out := &in.TCPOptions, &out.TCPOptions + *out = make([]TCPOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckInitParameters. +func (in *HealthcheckInitParameters) DeepCopy() *HealthcheckInitParameters { + if in == nil { + return nil + } + out := new(HealthcheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckObservation) DeepCopyInto(out *HealthcheckObservation) { + *out = *in + if in.HTTPOptions != nil { + in, out := &in.HTTPOptions, &out.HTTPOptions + *out = make([]HTTPOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TCPOptions != nil { + in, out := &in.TCPOptions, &out.TCPOptions + *out = make([]TCPOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckObservation. +func (in *HealthcheckObservation) DeepCopy() *HealthcheckObservation { + if in == nil { + return nil + } + out := new(HealthcheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcheckParameters) DeepCopyInto(out *HealthcheckParameters) { + *out = *in + if in.HTTPOptions != nil { + in, out := &in.HTTPOptions, &out.HTTPOptions + *out = make([]HTTPOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TCPOptions != nil { + in, out := &in.TCPOptions, &out.TCPOptions + *out = make([]TCPOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcheckParameters. +func (in *HealthcheckParameters) DeepCopy() *HealthcheckParameters { + if in == nil { + return nil + } + out := new(HealthcheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalAddressSpecInitParameters) DeepCopyInto(out *InternalAddressSpecInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.IPVersion != nil { + in, out := &in.IPVersion, &out.IPVersion + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalAddressSpecInitParameters. +func (in *InternalAddressSpecInitParameters) DeepCopy() *InternalAddressSpecInitParameters { + if in == nil { + return nil + } + out := new(InternalAddressSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalAddressSpecObservation) DeepCopyInto(out *InternalAddressSpecObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.IPVersion != nil { + in, out := &in.IPVersion, &out.IPVersion + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalAddressSpecObservation. +func (in *InternalAddressSpecObservation) DeepCopy() *InternalAddressSpecObservation { + if in == nil { + return nil + } + out := new(InternalAddressSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalAddressSpecParameters) DeepCopyInto(out *InternalAddressSpecParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.IPVersion != nil { + in, out := &in.IPVersion, &out.IPVersion + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalAddressSpecParameters. +func (in *InternalAddressSpecParameters) DeepCopy() *InternalAddressSpecParameters { + if in == nil { + return nil + } + out := new(InternalAddressSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerInitParameters) DeepCopyInto(out *ListenerInitParameters) { + *out = *in + if in.ExternalAddressSpec != nil { + in, out := &in.ExternalAddressSpec, &out.ExternalAddressSpec + *out = make([]ExternalAddressSpecInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InternalAddressSpec != nil { + in, out := &in.InternalAddressSpec, &out.InternalAddressSpec + *out = make([]InternalAddressSpecInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerInitParameters. +func (in *ListenerInitParameters) DeepCopy() *ListenerInitParameters { + if in == nil { + return nil + } + out := new(ListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerObservation) DeepCopyInto(out *ListenerObservation) { + *out = *in + if in.ExternalAddressSpec != nil { + in, out := &in.ExternalAddressSpec, &out.ExternalAddressSpec + *out = make([]ExternalAddressSpecObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InternalAddressSpec != nil { + in, out := &in.InternalAddressSpec, &out.InternalAddressSpec + *out = make([]InternalAddressSpecObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerObservation. +func (in *ListenerObservation) DeepCopy() *ListenerObservation { + if in == nil { + return nil + } + out := new(ListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerParameters) DeepCopyInto(out *ListenerParameters) { + *out = *in + if in.ExternalAddressSpec != nil { + in, out := &in.ExternalAddressSpec, &out.ExternalAddressSpec + *out = make([]ExternalAddressSpecParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InternalAddressSpec != nil { + in, out := &in.InternalAddressSpec, &out.InternalAddressSpec + *out = make([]InternalAddressSpecParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerParameters. +func (in *ListenerParameters) DeepCopy() *ListenerParameters { + if in == nil { + return nil + } + out := new(ListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkLoadBalancer) DeepCopyInto(out *NetworkLoadBalancer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkLoadBalancer. +func (in *NetworkLoadBalancer) DeepCopy() *NetworkLoadBalancer { + if in == nil { + return nil + } + out := new(NetworkLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkLoadBalancer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkLoadBalancerInitParameters) DeepCopyInto(out *NetworkLoadBalancerInitParameters) { + *out = *in + if in.AttachedTargetGroup != nil { + in, out := &in.AttachedTargetGroup, &out.AttachedTargetGroup + *out = make([]AttachedTargetGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkLoadBalancerInitParameters. +func (in *NetworkLoadBalancerInitParameters) DeepCopy() *NetworkLoadBalancerInitParameters { + if in == nil { + return nil + } + out := new(NetworkLoadBalancerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkLoadBalancerList) DeepCopyInto(out *NetworkLoadBalancerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkLoadBalancer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkLoadBalancerList. +func (in *NetworkLoadBalancerList) DeepCopy() *NetworkLoadBalancerList { + if in == nil { + return nil + } + out := new(NetworkLoadBalancerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkLoadBalancerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkLoadBalancerObservation) DeepCopyInto(out *NetworkLoadBalancerObservation) { + *out = *in + if in.AttachedTargetGroup != nil { + in, out := &in.AttachedTargetGroup, &out.AttachedTargetGroup + *out = make([]AttachedTargetGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkLoadBalancerObservation. +func (in *NetworkLoadBalancerObservation) DeepCopy() *NetworkLoadBalancerObservation { + if in == nil { + return nil + } + out := new(NetworkLoadBalancerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkLoadBalancerParameters) DeepCopyInto(out *NetworkLoadBalancerParameters) { + *out = *in + if in.AttachedTargetGroup != nil { + in, out := &in.AttachedTargetGroup, &out.AttachedTargetGroup + *out = make([]AttachedTargetGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkLoadBalancerParameters. +func (in *NetworkLoadBalancerParameters) DeepCopy() *NetworkLoadBalancerParameters { + if in == nil { + return nil + } + out := new(NetworkLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkLoadBalancerSpec) DeepCopyInto(out *NetworkLoadBalancerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkLoadBalancerSpec. +func (in *NetworkLoadBalancerSpec) DeepCopy() *NetworkLoadBalancerSpec { + if in == nil { + return nil + } + out := new(NetworkLoadBalancerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkLoadBalancerStatus) DeepCopyInto(out *NetworkLoadBalancerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkLoadBalancerStatus. +func (in *NetworkLoadBalancerStatus) DeepCopy() *NetworkLoadBalancerStatus { + if in == nil { + return nil + } + out := new(NetworkLoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPOptionsInitParameters) DeepCopyInto(out *TCPOptionsInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPOptionsInitParameters. +func (in *TCPOptionsInitParameters) DeepCopy() *TCPOptionsInitParameters { + if in == nil { + return nil + } + out := new(TCPOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPOptionsObservation) DeepCopyInto(out *TCPOptionsObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPOptionsObservation. +func (in *TCPOptionsObservation) DeepCopy() *TCPOptionsObservation { + if in == nil { + return nil + } + out := new(TCPOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPOptionsParameters) DeepCopyInto(out *TCPOptionsParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPOptionsParameters. +func (in *TCPOptionsParameters) DeepCopy() *TCPOptionsParameters { + if in == nil { + return nil + } + out := new(TCPOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroup) DeepCopyInto(out *TargetGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroup. +func (in *TargetGroup) DeepCopy() *TargetGroup { + if in == nil { + return nil + } + out := new(TargetGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TargetGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupInitParameters) DeepCopyInto(out *TargetGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]TargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupInitParameters. +func (in *TargetGroupInitParameters) DeepCopy() *TargetGroupInitParameters { + if in == nil { + return nil + } + out := new(TargetGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupList) DeepCopyInto(out *TargetGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TargetGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupList. +func (in *TargetGroupList) DeepCopy() *TargetGroupList { + if in == nil { + return nil + } + out := new(TargetGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TargetGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupObservation) DeepCopyInto(out *TargetGroupObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]TargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupObservation. +func (in *TargetGroupObservation) DeepCopy() *TargetGroupObservation { + if in == nil { + return nil + } + out := new(TargetGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupParameters) DeepCopyInto(out *TargetGroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]TargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupParameters. +func (in *TargetGroupParameters) DeepCopy() *TargetGroupParameters { + if in == nil { + return nil + } + out := new(TargetGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupSpec) DeepCopyInto(out *TargetGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupSpec. +func (in *TargetGroupSpec) DeepCopy() *TargetGroupSpec { + if in == nil { + return nil + } + out := new(TargetGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupStatus) DeepCopyInto(out *TargetGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupStatus. +func (in *TargetGroupStatus) DeepCopy() *TargetGroupStatus { + if in == nil { + return nil + } + out := new(TargetGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetInitParameters) DeepCopyInto(out *TargetInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetInitParameters. +func (in *TargetInitParameters) DeepCopy() *TargetInitParameters { + if in == nil { + return nil + } + out := new(TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObservation) DeepCopyInto(out *TargetObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObservation. +func (in *TargetObservation) DeepCopy() *TargetObservation { + if in == nil { + return nil + } + out := new(TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetParameters) DeepCopyInto(out *TargetParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetParameters. +func (in *TargetParameters) DeepCopy() *TargetParameters { + if in == nil { + return nil + } + out := new(TargetParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/lb/v1alpha1/zz_generated.resolvers.go b/apis/lb/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..0be95fa --- /dev/null +++ b/apis/lb/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,210 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this NetworkLoadBalancer. +func (mg *NetworkLoadBalancer) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.AttachedTargetGroup); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AttachedTargetGroup[i3].TargetGroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AttachedTargetGroup[i3].TargetGroupIDRef, + Selector: mg.Spec.ForProvider.AttachedTargetGroup[i3].TargetGroupIDSelector, + To: reference.To{ + List: &TargetGroupList{}, + Managed: &TargetGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AttachedTargetGroup[i3].TargetGroupID") + } + mg.Spec.ForProvider.AttachedTargetGroup[i3].TargetGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AttachedTargetGroup[i3].TargetGroupIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Listener); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Listener[i3].InternalAddressSpec); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Listener[i3].InternalAddressSpec[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Listener[i3].InternalAddressSpec[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.Listener[i3].InternalAddressSpec[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Listener[i3].InternalAddressSpec[i4].SubnetID") + } + mg.Spec.ForProvider.Listener[i3].InternalAddressSpec[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Listener[i3].InternalAddressSpec[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.AttachedTargetGroup); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AttachedTargetGroup[i3].TargetGroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.AttachedTargetGroup[i3].TargetGroupIDRef, + Selector: mg.Spec.InitProvider.AttachedTargetGroup[i3].TargetGroupIDSelector, + To: reference.To{ + List: &TargetGroupList{}, + Managed: &TargetGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AttachedTargetGroup[i3].TargetGroupID") + } + mg.Spec.InitProvider.AttachedTargetGroup[i3].TargetGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AttachedTargetGroup[i3].TargetGroupIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Listener); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Listener[i3].InternalAddressSpec); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Listener[i3].InternalAddressSpec[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Listener[i3].InternalAddressSpec[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.Listener[i3].InternalAddressSpec[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Listener[i3].InternalAddressSpec[i4].SubnetID") + } + mg.Spec.InitProvider.Listener[i3].InternalAddressSpec[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Listener[i3].InternalAddressSpec[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this TargetGroup. +func (mg *TargetGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Target); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Target[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Target[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.Target[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Target[i3].SubnetID") + } + mg.Spec.ForProvider.Target[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Target[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Target); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Target[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Target[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.Target[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Target[i3].SubnetID") + } + mg.Spec.InitProvider.Target[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Target[i3].SubnetIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/lb/v1alpha1/zz_groupversion_info.go b/apis/lb/v1alpha1/zz_groupversion_info.go index 9060a6b..87a7bee 100755 --- a/apis/lb/v1alpha1/zz_groupversion_info.go +++ b/apis/lb/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/lb/v1alpha1/zz_networkloadbalancer_terraformed.go b/apis/lb/v1alpha1/zz_networkloadbalancer_terraformed.go index bb440b7..8718bb3 100755 --- a/apis/lb/v1alpha1/zz_networkloadbalancer_terraformed.go +++ b/apis/lb/v1alpha1/zz_networkloadbalancer_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this NetworkLoadBalancer func (mg *NetworkLoadBalancer) GetTerraformResourceType() string { - return "yandex_lb_network_load_balancer" + return "yandex_lb_network_load_balancer" } // GetConnectionDetailsMapping for this NetworkLoadBalancer func (tr *NetworkLoadBalancer) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this NetworkLoadBalancer func (tr *NetworkLoadBalancer) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this NetworkLoadBalancer func (tr *NetworkLoadBalancer) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this NetworkLoadBalancer func (tr *NetworkLoadBalancer) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this NetworkLoadBalancer func (tr *NetworkLoadBalancer) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this NetworkLoadBalancer func (tr *NetworkLoadBalancer) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this NetworkLoadBalancer func (tr *NetworkLoadBalancer) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this NetworkLoadBalancer func (tr *NetworkLoadBalancer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this NetworkLoadBalancer using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *NetworkLoadBalancer) LateInitialize(attrs []byte) (bool, error) { - params := &NetworkLoadBalancerParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &NetworkLoadBalancerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *NetworkLoadBalancer) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/lb/v1alpha1/zz_networkloadbalancer_types.go b/apis/lb/v1alpha1/zz_networkloadbalancer_types.go index 56a5227..5941f0c 100755 --- a/apis/lb/v1alpha1/zz_networkloadbalancer_types.go +++ b/apis/lb/v1alpha1/zz_networkloadbalancer_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,518 +7,467 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AttachedTargetGroupInitParameters struct { + // A HealthCheck resource. The structure is documented below. + Healthcheck []HealthcheckInitParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -// A HealthCheck resource. The structure is documented below. -Healthcheck []HealthcheckInitParameters `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` - -// ID of the target group. -// +crossplane:generate:reference:type=TargetGroup -TargetGroupID *string `json:"targetGroupId,omitempty" tf:"target_group_id,omitempty"` + // ID of the target group. + // +crossplane:generate:reference:type=TargetGroup + TargetGroupID *string `json:"targetGroupId,omitempty" tf:"target_group_id,omitempty"` -// Reference to a TargetGroup to populate targetGroupId. -// +kubebuilder:validation:Optional -TargetGroupIDRef *v1.Reference `json:"targetGroupIdRef,omitempty" tf:"-"` + // Reference to a TargetGroup to populate targetGroupId. + // +kubebuilder:validation:Optional + TargetGroupIDRef *v1.Reference `json:"targetGroupIdRef,omitempty" tf:"-"` -// Selector for a TargetGroup to populate targetGroupId. -// +kubebuilder:validation:Optional -TargetGroupIDSelector *v1.Selector `json:"targetGroupIdSelector,omitempty" tf:"-"` + // Selector for a TargetGroup to populate targetGroupId. + // +kubebuilder:validation:Optional + TargetGroupIDSelector *v1.Selector `json:"targetGroupIdSelector,omitempty" tf:"-"` } - type AttachedTargetGroupObservation struct { + // A HealthCheck resource. The structure is documented below. + Healthcheck []HealthcheckObservation `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` -// A HealthCheck resource. The structure is documented below. -Healthcheck []HealthcheckObservation `json:"healthcheck,omitempty" tf:"healthcheck,omitempty"` - -// ID of the target group. -TargetGroupID *string `json:"targetGroupId,omitempty" tf:"target_group_id,omitempty"` + // ID of the target group. + TargetGroupID *string `json:"targetGroupId,omitempty" tf:"target_group_id,omitempty"` } - type AttachedTargetGroupParameters struct { + // A HealthCheck resource. The structure is documented below. + // +kubebuilder:validation:Optional + Healthcheck []HealthcheckParameters `json:"healthcheck" tf:"healthcheck,omitempty"` -// A HealthCheck resource. The structure is documented below. -// +kubebuilder:validation:Optional -Healthcheck []HealthcheckParameters `json:"healthcheck" tf:"healthcheck,omitempty"` - -// ID of the target group. -// +crossplane:generate:reference:type=TargetGroup -// +kubebuilder:validation:Optional -TargetGroupID *string `json:"targetGroupId,omitempty" tf:"target_group_id,omitempty"` + // ID of the target group. + // +crossplane:generate:reference:type=TargetGroup + // +kubebuilder:validation:Optional + TargetGroupID *string `json:"targetGroupId,omitempty" tf:"target_group_id,omitempty"` -// Reference to a TargetGroup to populate targetGroupId. -// +kubebuilder:validation:Optional -TargetGroupIDRef *v1.Reference `json:"targetGroupIdRef,omitempty" tf:"-"` + // Reference to a TargetGroup to populate targetGroupId. + // +kubebuilder:validation:Optional + TargetGroupIDRef *v1.Reference `json:"targetGroupIdRef,omitempty" tf:"-"` -// Selector for a TargetGroup to populate targetGroupId. -// +kubebuilder:validation:Optional -TargetGroupIDSelector *v1.Selector `json:"targetGroupIdSelector,omitempty" tf:"-"` + // Selector for a TargetGroup to populate targetGroupId. + // +kubebuilder:validation:Optional + TargetGroupIDSelector *v1.Selector `json:"targetGroupIdSelector,omitempty" tf:"-"` } - type ExternalAddressSpecInitParameters struct { + // Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. -Address *string `json:"address,omitempty" tf:"address,omitempty"` - -// IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. -IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` + // IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. + IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` } - type ExternalAddressSpecObservation struct { + // Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. -Address *string `json:"address,omitempty" tf:"address,omitempty"` - -// IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. -IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` + // IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. + IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` } - type ExternalAddressSpecParameters struct { + // Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. -// +kubebuilder:validation:Optional -Address *string `json:"address,omitempty" tf:"address,omitempty"` - -// IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. -// +kubebuilder:validation:Optional -IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` + // IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. + // +kubebuilder:validation:Optional + IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` } - type HTTPOptionsInitParameters struct { + // URL path to set for health checking requests for every target in the target group. For example /ping. The default path is /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// URL path to set for health checking requests for every target in the target group. For example /ping. The default path is /. -Path *string `json:"path,omitempty" tf:"path,omitempty"` - -// Port to use for TCP health checks. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port to use for TCP health checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` } - type HTTPOptionsObservation struct { + // URL path to set for health checking requests for every target in the target group. For example /ping. The default path is /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// URL path to set for health checking requests for every target in the target group. For example /ping. The default path is /. -Path *string `json:"path,omitempty" tf:"path,omitempty"` - -// Port to use for TCP health checks. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port to use for TCP health checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` } - type HTTPOptionsParameters struct { + // URL path to set for health checking requests for every target in the target group. For example /ping. The default path is /. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` -// URL path to set for health checking requests for every target in the target group. For example /ping. The default path is /. -// +kubebuilder:validation:Optional -Path *string `json:"path,omitempty" tf:"path,omitempty"` - -// Port to use for TCP health checks. -// +kubebuilder:validation:Optional -Port *float64 `json:"port" tf:"port,omitempty"` + // Port to use for TCP health checks. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` } - type HealthcheckInitParameters struct { + // Options for HTTP health check. The structure is documented below. + HTTPOptions []HTTPOptionsInitParameters `json:"httpOptions,omitempty" tf:"http_options,omitempty"` -// Options for HTTP health check. The structure is documented below. -HTTPOptions []HTTPOptionsInitParameters `json:"httpOptions,omitempty" tf:"http_options,omitempty"` + // Number of successful health checks required in order to set the HEALTHY status for the target. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// Number of successful health checks required in order to set the HEALTHY status for the target. -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // The interval between health checks. The default is 2 seconds. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` -// The interval between health checks. The default is 2 seconds. -Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + // Name of the network load balancer. Provided by the client when the network load balancer is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the network load balancer. Provided by the client when the network load balancer is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Options for TCP health check. The structure is documented below. + TCPOptions []TCPOptionsInitParameters `json:"tcpOptions,omitempty" tf:"tcp_options,omitempty"` -// Options for TCP health check. The structure is documented below. -TCPOptions []TCPOptionsInitParameters `json:"tcpOptions,omitempty" tf:"tcp_options,omitempty"` + // Timeout for a target to return a response for the health check. The default is 1 second. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Timeout for a target to return a response for the health check. The default is 1 second. -Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` - -// Number of failed health checks before changing the status to UNHEALTHY. The default is 2. -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of failed health checks before changing the status to UNHEALTHY. The default is 2. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type HealthcheckObservation struct { + // Options for HTTP health check. The structure is documented below. + HTTPOptions []HTTPOptionsObservation `json:"httpOptions,omitempty" tf:"http_options,omitempty"` -// Options for HTTP health check. The structure is documented below. -HTTPOptions []HTTPOptionsObservation `json:"httpOptions,omitempty" tf:"http_options,omitempty"` - -// Number of successful health checks required in order to set the HEALTHY status for the target. -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Number of successful health checks required in order to set the HEALTHY status for the target. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// The interval between health checks. The default is 2 seconds. -Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + // The interval between health checks. The default is 2 seconds. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` -// Name of the network load balancer. Provided by the client when the network load balancer is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the network load balancer. Provided by the client when the network load balancer is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Options for TCP health check. The structure is documented below. -TCPOptions []TCPOptionsObservation `json:"tcpOptions,omitempty" tf:"tcp_options,omitempty"` + // Options for TCP health check. The structure is documented below. + TCPOptions []TCPOptionsObservation `json:"tcpOptions,omitempty" tf:"tcp_options,omitempty"` -// Timeout for a target to return a response for the health check. The default is 1 second. -Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + // Timeout for a target to return a response for the health check. The default is 1 second. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Number of failed health checks before changing the status to UNHEALTHY. The default is 2. -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of failed health checks before changing the status to UNHEALTHY. The default is 2. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type HealthcheckParameters struct { + // Options for HTTP health check. The structure is documented below. + // +kubebuilder:validation:Optional + HTTPOptions []HTTPOptionsParameters `json:"httpOptions,omitempty" tf:"http_options,omitempty"` -// Options for HTTP health check. The structure is documented below. -// +kubebuilder:validation:Optional -HTTPOptions []HTTPOptionsParameters `json:"httpOptions,omitempty" tf:"http_options,omitempty"` - -// Number of successful health checks required in order to set the HEALTHY status for the target. -// +kubebuilder:validation:Optional -HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + // Number of successful health checks required in order to set the HEALTHY status for the target. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` -// The interval between health checks. The default is 2 seconds. -// +kubebuilder:validation:Optional -Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + // The interval between health checks. The default is 2 seconds. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` -// Name of the network load balancer. Provided by the client when the network load balancer is created. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // Name of the network load balancer. Provided by the client when the network load balancer is created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// Options for TCP health check. The structure is documented below. -// +kubebuilder:validation:Optional -TCPOptions []TCPOptionsParameters `json:"tcpOptions,omitempty" tf:"tcp_options,omitempty"` + // Options for TCP health check. The structure is documented below. + // +kubebuilder:validation:Optional + TCPOptions []TCPOptionsParameters `json:"tcpOptions,omitempty" tf:"tcp_options,omitempty"` -// Timeout for a target to return a response for the health check. The default is 1 second. -// +kubebuilder:validation:Optional -Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + // Timeout for a target to return a response for the health check. The default is 1 second. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` -// Number of failed health checks before changing the status to UNHEALTHY. The default is 2. -// +kubebuilder:validation:Optional -UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` + // Number of failed health checks before changing the status to UNHEALTHY. The default is 2. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` } - type InternalAddressSpecInitParameters struct { + // Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. -Address *string `json:"address,omitempty" tf:"address,omitempty"` - -// IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. -IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` + // IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. + IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` -// ID of the subnet to which the internal IP address belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // ID of the subnet to which the internal IP address belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type InternalAddressSpecObservation struct { + // Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. -Address *string `json:"address,omitempty" tf:"address,omitempty"` - -// IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. -IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` + // IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. + IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` -// ID of the subnet to which the internal IP address belongs. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // ID of the subnet to which the internal IP address belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type InternalAddressSpecParameters struct { + // Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// Internal IP address for a listener. Must belong to the subnet that is referenced in subnet_id. IP address will be allocated if it wasn't been set. -// +kubebuilder:validation:Optional -Address *string `json:"address,omitempty" tf:"address,omitempty"` + // IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. + // +kubebuilder:validation:Optional + IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` -// IP version of the internal addresses that the load balancer works with. Must be one of ipv4 or ipv6. The default is ipv4. -// +kubebuilder:validation:Optional -IPVersion *string `json:"ipVersion,omitempty" tf:"ip_version,omitempty"` + // ID of the subnet to which the internal IP address belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// ID of the subnet to which the internal IP address belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type ListenerInitParameters struct { + // External IP address specification. The structure is documented below. + ExternalAddressSpec []ExternalAddressSpecInitParameters `json:"externalAddressSpec,omitempty" tf:"external_address_spec,omitempty"` -// External IP address specification. The structure is documented below. -ExternalAddressSpec []ExternalAddressSpecInitParameters `json:"externalAddressSpec,omitempty" tf:"external_address_spec,omitempty"` + // Internal IP address specification. The structure is documented below. + InternalAddressSpec []InternalAddressSpecInitParameters `json:"internalAddressSpec,omitempty" tf:"internal_address_spec,omitempty"` -// Internal IP address specification. The structure is documented below. -InternalAddressSpec []InternalAddressSpecInitParameters `json:"internalAddressSpec,omitempty" tf:"internal_address_spec,omitempty"` + // Name of the listener. The name must be unique for each listener on a single load balancer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the listener. The name must be unique for each listener on a single load balancer. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Port for incoming traffic. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Port for incoming traffic. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Protocol for incoming traffic. TCP or UDP and the default is TCP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` -// Protocol for incoming traffic. TCP or UDP and the default is TCP. -Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` - -// Port of a target. The default is the same as listener's port. -TargetPort *float64 `json:"targetPort,omitempty" tf:"target_port,omitempty"` + // Port of a target. The default is the same as listener's port. + TargetPort *float64 `json:"targetPort,omitempty" tf:"target_port,omitempty"` } - type ListenerObservation struct { + // External IP address specification. The structure is documented below. + ExternalAddressSpec []ExternalAddressSpecObservation `json:"externalAddressSpec,omitempty" tf:"external_address_spec,omitempty"` -// External IP address specification. The structure is documented below. -ExternalAddressSpec []ExternalAddressSpecObservation `json:"externalAddressSpec,omitempty" tf:"external_address_spec,omitempty"` + // Internal IP address specification. The structure is documented below. + InternalAddressSpec []InternalAddressSpecObservation `json:"internalAddressSpec,omitempty" tf:"internal_address_spec,omitempty"` -// Internal IP address specification. The structure is documented below. -InternalAddressSpec []InternalAddressSpecObservation `json:"internalAddressSpec,omitempty" tf:"internal_address_spec,omitempty"` + // Name of the listener. The name must be unique for each listener on a single load balancer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the listener. The name must be unique for each listener on a single load balancer. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Port for incoming traffic. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Port for incoming traffic. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Protocol for incoming traffic. TCP or UDP and the default is TCP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` -// Protocol for incoming traffic. TCP or UDP and the default is TCP. -Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` - -// Port of a target. The default is the same as listener's port. -TargetPort *float64 `json:"targetPort,omitempty" tf:"target_port,omitempty"` + // Port of a target. The default is the same as listener's port. + TargetPort *float64 `json:"targetPort,omitempty" tf:"target_port,omitempty"` } - type ListenerParameters struct { + // External IP address specification. The structure is documented below. + // +kubebuilder:validation:Optional + ExternalAddressSpec []ExternalAddressSpecParameters `json:"externalAddressSpec,omitempty" tf:"external_address_spec,omitempty"` -// External IP address specification. The structure is documented below. -// +kubebuilder:validation:Optional -ExternalAddressSpec []ExternalAddressSpecParameters `json:"externalAddressSpec,omitempty" tf:"external_address_spec,omitempty"` - -// Internal IP address specification. The structure is documented below. -// +kubebuilder:validation:Optional -InternalAddressSpec []InternalAddressSpecParameters `json:"internalAddressSpec,omitempty" tf:"internal_address_spec,omitempty"` + // Internal IP address specification. The structure is documented below. + // +kubebuilder:validation:Optional + InternalAddressSpec []InternalAddressSpecParameters `json:"internalAddressSpec,omitempty" tf:"internal_address_spec,omitempty"` -// Name of the listener. The name must be unique for each listener on a single load balancer. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // Name of the listener. The name must be unique for each listener on a single load balancer. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// Port for incoming traffic. -// +kubebuilder:validation:Optional -Port *float64 `json:"port" tf:"port,omitempty"` + // Port for incoming traffic. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` -// Protocol for incoming traffic. TCP or UDP and the default is TCP. -// +kubebuilder:validation:Optional -Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + // Protocol for incoming traffic. TCP or UDP and the default is TCP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` -// Port of a target. The default is the same as listener's port. -// +kubebuilder:validation:Optional -TargetPort *float64 `json:"targetPort,omitempty" tf:"target_port,omitempty"` + // Port of a target. The default is the same as listener's port. + // +kubebuilder:validation:Optional + TargetPort *float64 `json:"targetPort,omitempty" tf:"target_port,omitempty"` } - type NetworkLoadBalancerInitParameters struct { + // An AttachedTargetGroup resource. The structure is documented below. + AttachedTargetGroup []AttachedTargetGroupInitParameters `json:"attachedTargetGroup,omitempty" tf:"attached_target_group,omitempty"` -// An AttachedTargetGroup resource. The structure is documented below. -AttachedTargetGroup []AttachedTargetGroupInitParameters `json:"attachedTargetGroup,omitempty" tf:"attached_target_group,omitempty"` - -// Flag that protects the network load balancer from accidental deletion. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Flag that protects the network load balancer from accidental deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// An optional description of the network load balancer. Provide this property when you create the resource. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // An optional description of the network load balancer. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Labels to assign to this network load balancer. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this network load balancer. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Listener specification that will be used by a network load balancer. The structure is documented below. -Listener []ListenerInitParameters `json:"listener,omitempty" tf:"listener,omitempty"` + // Listener specification that will be used by a network load balancer. The structure is documented below. + Listener []ListenerInitParameters `json:"listener,omitempty" tf:"listener,omitempty"` -// Name of the network load balancer. Provided by the client when the network load balancer is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the network load balancer. Provided by the client when the network load balancer is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the availability zone where the network load balancer resides. If omitted, default region is being used. -RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + // ID of the availability zone where the network load balancer resides. If omitted, default region is being used. + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` -// Type of the network load balancer. Must be one of 'external' or 'internal'. The default is 'external'. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of the network load balancer. Must be one of 'external' or 'internal'. The default is 'external'. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type NetworkLoadBalancerObservation struct { + // An AttachedTargetGroup resource. The structure is documented below. + AttachedTargetGroup []AttachedTargetGroupObservation `json:"attachedTargetGroup,omitempty" tf:"attached_target_group,omitempty"` -// An AttachedTargetGroup resource. The structure is documented below. -AttachedTargetGroup []AttachedTargetGroupObservation `json:"attachedTargetGroup,omitempty" tf:"attached_target_group,omitempty"` - -// The network load balancer creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // The network load balancer creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Flag that protects the network load balancer from accidental deletion. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Flag that protects the network load balancer from accidental deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// An optional description of the network load balancer. Provide this property when you create the resource. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // An optional description of the network load balancer. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the network load balancer. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The ID of the network load balancer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Labels to assign to this network load balancer. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this network load balancer. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Listener specification that will be used by a network load balancer. The structure is documented below. -Listener []ListenerObservation `json:"listener,omitempty" tf:"listener,omitempty"` + // Listener specification that will be used by a network load balancer. The structure is documented below. + Listener []ListenerObservation `json:"listener,omitempty" tf:"listener,omitempty"` -// Name of the network load balancer. Provided by the client when the network load balancer is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the network load balancer. Provided by the client when the network load balancer is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the availability zone where the network load balancer resides. If omitted, default region is being used. -RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + // ID of the availability zone where the network load balancer resides. If omitted, default region is being used. + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` -// Type of the network load balancer. Must be one of 'external' or 'internal'. The default is 'external'. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of the network load balancer. Must be one of 'external' or 'internal'. The default is 'external'. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type NetworkLoadBalancerParameters struct { + // An AttachedTargetGroup resource. The structure is documented below. + // +kubebuilder:validation:Optional + AttachedTargetGroup []AttachedTargetGroupParameters `json:"attachedTargetGroup,omitempty" tf:"attached_target_group,omitempty"` -// An AttachedTargetGroup resource. The structure is documented below. -// +kubebuilder:validation:Optional -AttachedTargetGroup []AttachedTargetGroupParameters `json:"attachedTargetGroup,omitempty" tf:"attached_target_group,omitempty"` - -// Flag that protects the network load balancer from accidental deletion. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Flag that protects the network load balancer from accidental deletion. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// An optional description of the network load balancer. Provide this property when you create the resource. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // An optional description of the network load balancer. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Labels to assign to this network load balancer. A list of key/value pairs. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this network load balancer. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Listener specification that will be used by a network load balancer. The structure is documented below. -// +kubebuilder:validation:Optional -Listener []ListenerParameters `json:"listener,omitempty" tf:"listener,omitempty"` + // Listener specification that will be used by a network load balancer. The structure is documented below. + // +kubebuilder:validation:Optional + Listener []ListenerParameters `json:"listener,omitempty" tf:"listener,omitempty"` -// Name of the network load balancer. Provided by the client when the network load balancer is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the network load balancer. Provided by the client when the network load balancer is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the availability zone where the network load balancer resides. If omitted, default region is being used. -// +kubebuilder:validation:Optional -RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + // ID of the availability zone where the network load balancer resides. If omitted, default region is being used. + // +kubebuilder:validation:Optional + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` -// Type of the network load balancer. Must be one of 'external' or 'internal'. The default is 'external'. -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of the network load balancer. Must be one of 'external' or 'internal'. The default is 'external'. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type TCPOptionsInitParameters struct { - -// Port to use for TCP health checks. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port to use for TCP health checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` } - type TCPOptionsObservation struct { - -// Port to use for TCP health checks. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port to use for TCP health checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` } - type TCPOptionsParameters struct { - -// Port to use for TCP health checks. -// +kubebuilder:validation:Optional -Port *float64 `json:"port" tf:"port,omitempty"` + // Port to use for TCP health checks. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` } // NetworkLoadBalancerSpec defines the desired state of NetworkLoadBalancer type NetworkLoadBalancerSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider NetworkLoadBalancerParameters `json:"forProvider"` + ForProvider NetworkLoadBalancerParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -531,20 +478,19 @@ type NetworkLoadBalancerSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider NetworkLoadBalancerInitParameters `json:"initProvider,omitempty"` + InitProvider NetworkLoadBalancerInitParameters `json:"initProvider,omitempty"` } // NetworkLoadBalancerStatus defines the observed state of NetworkLoadBalancer. type NetworkLoadBalancerStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider NetworkLoadBalancerObservation `json:"atProvider,omitempty"` + AtProvider NetworkLoadBalancerObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // NetworkLoadBalancer is the Schema for the NetworkLoadBalancers API. A network load balancer is used to evenly distribute the load across cloud resources. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/lb/v1alpha1/zz_targetgroup_terraformed.go b/apis/lb/v1alpha1/zz_targetgroup_terraformed.go index a7168e3..ec494f9 100755 --- a/apis/lb/v1alpha1/zz_targetgroup_terraformed.go +++ b/apis/lb/v1alpha1/zz_targetgroup_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this TargetGroup func (mg *TargetGroup) GetTerraformResourceType() string { - return "yandex_lb_target_group" + return "yandex_lb_target_group" } // GetConnectionDetailsMapping for this TargetGroup func (tr *TargetGroup) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this TargetGroup func (tr *TargetGroup) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this TargetGroup func (tr *TargetGroup) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this TargetGroup func (tr *TargetGroup) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this TargetGroup func (tr *TargetGroup) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this TargetGroup func (tr *TargetGroup) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this TargetGroup func (tr *TargetGroup) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this TargetGroup func (tr *TargetGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this TargetGroup using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *TargetGroup) LateInitialize(attrs []byte) (bool, error) { - params := &TargetGroupParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &TargetGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *TargetGroup) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/lb/v1alpha1/zz_targetgroup_types.go b/apis/lb/v1alpha1/zz_targetgroup_types.go index ed02e26..6c7dab6 100755 --- a/apis/lb/v1alpha1/zz_targetgroup_types.go +++ b/apis/lb/v1alpha1/zz_targetgroup_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,170 +7,155 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type TargetGroupInitParameters struct { + // An optional description of the target group. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// An optional description of the target group. Provide this property when you create the resource. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Labels to assign to this target group. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this target group. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the target group. Provided by the client when the target group is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the target group. Provided by the client when the target group is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the availability zone where the target group resides. If omitted, default region is being used. -RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + // ID of the availability zone where the target group resides. If omitted, default region is being used. + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` -// A Target resource. The structure is documented below. -Target []TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` + // A Target resource. The structure is documented below. + Target []TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` } - type TargetGroupObservation struct { + // The target group creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// The target group creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` - -// An optional description of the target group. Provide this property when you create the resource. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // An optional description of the target group. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the target group. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The ID of the target group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Labels to assign to this target group. A list of key/value pairs. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this target group. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the target group. Provided by the client when the target group is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the target group. Provided by the client when the target group is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the availability zone where the target group resides. If omitted, default region is being used. -RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + // ID of the availability zone where the target group resides. If omitted, default region is being used. + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` -// A Target resource. The structure is documented below. -Target []TargetObservation `json:"target,omitempty" tf:"target,omitempty"` + // A Target resource. The structure is documented below. + Target []TargetObservation `json:"target,omitempty" tf:"target,omitempty"` } - type TargetGroupParameters struct { + // An optional description of the target group. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// An optional description of the target group. Provide this property when you create the resource. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// The ID of the folder to which the resource belongs. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Labels to assign to this target group. A list of key/value pairs. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels to assign to this target group. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the target group. Provided by the client when the target group is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the target group. Provided by the client when the target group is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the availability zone where the target group resides. If omitted, default region is being used. -// +kubebuilder:validation:Optional -RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + // ID of the availability zone where the target group resides. If omitted, default region is being used. + // +kubebuilder:validation:Optional + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` -// A Target resource. The structure is documented below. -// +kubebuilder:validation:Optional -Target []TargetParameters `json:"target,omitempty" tf:"target,omitempty"` + // A Target resource. The structure is documented below. + // +kubebuilder:validation:Optional + Target []TargetParameters `json:"target,omitempty" tf:"target,omitempty"` } - type TargetInitParameters struct { + // IP address of the target. + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// IP address of the target. -Address *string `json:"address,omitempty" tf:"address,omitempty"` + // ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type TargetObservation struct { + // IP address of the target. + Address *string `json:"address,omitempty" tf:"address,omitempty"` -// IP address of the target. -Address *string `json:"address,omitempty" tf:"address,omitempty"` - -// ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type TargetParameters struct { + // IP address of the target. + // +kubebuilder:validation:Optional + Address *string `json:"address" tf:"address,omitempty"` -// IP address of the target. -// +kubebuilder:validation:Optional -Address *string `json:"address" tf:"address,omitempty"` + // ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// ID of the subnet that targets are connected to. All targets in the target group must be connected to the same subnet within a single availability zone. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } // TargetGroupSpec defines the desired state of TargetGroup type TargetGroupSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider TargetGroupParameters `json:"forProvider"` + ForProvider TargetGroupParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -183,20 +166,19 @@ type TargetGroupSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider TargetGroupInitParameters `json:"initProvider,omitempty"` + InitProvider TargetGroupInitParameters `json:"initProvider,omitempty"` } // TargetGroupStatus defines the observed state of TargetGroup. type TargetGroupStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider TargetGroupObservation `json:"atProvider,omitempty"` + AtProvider TargetGroupObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // TargetGroup is the Schema for the TargetGroups API. A load balancer distributes the load across cloud resources that are combined into a target group. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/loadtesting/v1alpha1/zz_agent_terraformed.go b/apis/loadtesting/v1alpha1/zz_agent_terraformed.go index 966c861..45d36be 100755 --- a/apis/loadtesting/v1alpha1/zz_agent_terraformed.go +++ b/apis/loadtesting/v1alpha1/zz_agent_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Agent func (mg *Agent) GetTerraformResourceType() string { - return "yandex_loadtesting_agent" + return "yandex_loadtesting_agent" } // GetConnectionDetailsMapping for this Agent func (tr *Agent) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Agent func (tr *Agent) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Agent func (tr *Agent) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Agent func (tr *Agent) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Agent func (tr *Agent) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Agent func (tr *Agent) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Agent func (tr *Agent) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Agent func (tr *Agent) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Agent using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Agent) LateInitialize(attrs []byte) (bool, error) { - params := &AgentParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &AgentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Agent) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/loadtesting/v1alpha1/zz_agent_types.go b/apis/loadtesting/v1alpha1/zz_agent_types.go index 73fe4eb..2a89284 100755 --- a/apis/loadtesting/v1alpha1/zz_agent_types.go +++ b/apis/loadtesting/v1alpha1/zz_agent_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,512 +7,473 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AgentInitParameters struct { + // The template for creating new compute instance running load testing agent. The structure is documented below. + ComputeInstance []ComputeInstanceInitParameters `json:"computeInstance,omitempty" tf:"compute_instance,omitempty"` -// The template for creating new compute instance running load testing agent. The structure is documented below. -ComputeInstance []ComputeInstanceInitParameters `json:"computeInstance,omitempty" tf:"compute_instance,omitempty"` + // A description of the load testing agent. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A description of the load testing agent. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // The ID of the folder that the resources belong to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the folder that the resources belong to. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // A set of key/value label pairs to assign to the agent. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the agent. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - -// The name of the load testing agent. Must be unique within folder. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the load testing agent. Must be unique within folder. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type AgentObservation struct { + // The template for creating new compute instance running load testing agent. The structure is documented below. + ComputeInstance []ComputeInstanceObservation `json:"computeInstance,omitempty" tf:"compute_instance,omitempty"` -// The template for creating new compute instance running load testing agent. The structure is documented below. -ComputeInstance []ComputeInstanceObservation `json:"computeInstance,omitempty" tf:"compute_instance,omitempty"` - -ComputeInstanceID *string `json:"computeInstanceId,omitempty" tf:"compute_instance_id,omitempty"` + ComputeInstanceID *string `json:"computeInstanceId,omitempty" tf:"compute_instance_id,omitempty"` -// A description of the load testing agent. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // A description of the load testing agent. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The ID of the folder that the resources belong to. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resources belong to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the agent. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the agent. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// The name of the load testing agent. Must be unique within folder. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the load testing agent. Must be unique within folder. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type AgentParameters struct { + // The template for creating new compute instance running load testing agent. The structure is documented below. + // +kubebuilder:validation:Optional + ComputeInstance []ComputeInstanceParameters `json:"computeInstance,omitempty" tf:"compute_instance,omitempty"` -// The template for creating new compute instance running load testing agent. The structure is documented below. -// +kubebuilder:validation:Optional -ComputeInstance []ComputeInstanceParameters `json:"computeInstance,omitempty" tf:"compute_instance,omitempty"` - -// A description of the load testing agent. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // A description of the load testing agent. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The ID of the folder that the resources belong to. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resources belong to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of key/value label pairs to assign to the agent. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the agent. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// The name of the load testing agent. Must be unique within folder. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the load testing agent. Must be unique within folder. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type BootDiskInitParameters struct { + // Whether the disk is auto-deleted when the instance is deleted. The default value is true. + AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` -// Whether the disk is auto-deleted when the instance is deleted. The default value is true. -AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` - -// This value can be used to reference the device under /dev/disk/by-id/. -DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + // This value can be used to reference the device under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` -// Parameters for creating a disk alongside the instance. The structure is documented below. -InitializeParams []InitializeParamsInitParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + // Parameters for creating a disk alongside the instance. The structure is documented below. + InitializeParams []InitializeParamsInitParameters `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` } - type BootDiskObservation struct { + // Whether the disk is auto-deleted when the instance is deleted. The default value is true. + AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` -// Whether the disk is auto-deleted when the instance is deleted. The default value is true. -AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` + // This value can be used to reference the device under /dev/disk/by-id/. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` -// This value can be used to reference the device under /dev/disk/by-id/. -DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + // (Computed) The ID of created disk. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` -// (Computed) The ID of created disk. -DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` - -// Parameters for creating a disk alongside the instance. The structure is documented below. -InitializeParams []InitializeParamsObservation `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` + // Parameters for creating a disk alongside the instance. The structure is documented below. + InitializeParams []InitializeParamsObservation `json:"initializeParams,omitempty" tf:"initialize_params,omitempty"` } - type BootDiskParameters struct { + // Whether the disk is auto-deleted when the instance is deleted. The default value is true. + // +kubebuilder:validation:Optional + AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` -// Whether the disk is auto-deleted when the instance is deleted. The default value is true. -// +kubebuilder:validation:Optional -AutoDelete *bool `json:"autoDelete,omitempty" tf:"auto_delete,omitempty"` + // This value can be used to reference the device under /dev/disk/by-id/. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` -// This value can be used to reference the device under /dev/disk/by-id/. -// +kubebuilder:validation:Optional -DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` - -// Parameters for creating a disk alongside the instance. The structure is documented below. -// +kubebuilder:validation:Optional -InitializeParams []InitializeParamsParameters `json:"initializeParams" tf:"initialize_params,omitempty"` + // Parameters for creating a disk alongside the instance. The structure is documented below. + // +kubebuilder:validation:Optional + InitializeParams []InitializeParamsParameters `json:"initializeParams" tf:"initialize_params,omitempty"` } - type ComputeInstanceInitParameters struct { + // Boot disk specifications for the instance. The structure is documented below. + BootDisk []BootDiskInitParameters `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` -// Boot disk specifications for the instance. The structure is documented below. -BootDisk []BootDiskInitParameters `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` - -// A set of key/value label pairs to assign to the instance. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the instance. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of metadata key/value pairs to make available from within the instance. -// +mapType=granular -Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + // A set of metadata key/value pairs to make available from within the instance. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` -// Network specifications for the instance. This can be used multiple times for adding multiple interfaces. The structure is documented below. -NetworkInterface []NetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + // Network specifications for the instance. This can be used multiple times for adding multiple interfaces. The structure is documented below. + NetworkInterface []NetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` -// The Compute platform of virtual machine. If it is not provided, the standard-v2 platform will be used. -PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + // The Compute platform of virtual machine. If it is not provided, the standard-v2 platform will be used. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` -// Compute resource specifications for the instance. The structure is documented below. -Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Compute resource specifications for the instance. The structure is documented below. + Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` -// The ID of the service account authorized for this load testing agent. Service account should have loadtesting.generatorClient or loadtesting.externalAgent role in the folder. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // The ID of the service account authorized for this load testing agent. Service account should have loadtesting.generatorClient or loadtesting.externalAgent role in the folder. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Reference to a SecurityGroup in vpc to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + // Reference to a SecurityGroup in vpc to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` -// Selector for a SecurityGroup in vpc to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + // Selector for a SecurityGroup in vpc to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` -// The availability zone where the virtual machine will be created. If it is not provided, the default provider folder is used. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // The availability zone where the virtual machine will be created. If it is not provided, the default provider folder is used. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type ComputeInstanceObservation struct { + // Boot disk specifications for the instance. The structure is documented below. + BootDisk []BootDiskObservation `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` -// Boot disk specifications for the instance. The structure is documented below. -BootDisk []BootDiskObservation `json:"bootDisk,omitempty" tf:"boot_disk,omitempty"` - -// (Computed) The set of labels key:value pairs assigned to this instance. This includes user custom labels and predefined items created by Yandex Cloud Load Testing. -// +mapType=granular -ComputedLabels map[string]*string `json:"computedLabels,omitempty" tf:"computed_labels,omitempty"` + // (Computed) The set of labels key:value pairs assigned to this instance. This includes user custom labels and predefined items created by Yandex Cloud Load Testing. + // +mapType=granular + ComputedLabels map[string]*string `json:"computedLabels,omitempty" tf:"computed_labels,omitempty"` -// (Computed) The set of metadata key:value pairs assigned to this instance. This includes user custom metadata, and predefined items created by Yandex Cloud Load Testing. -// +mapType=granular -ComputedMetadata map[string]*string `json:"computedMetadata,omitempty" tf:"computed_metadata,omitempty"` + // (Computed) The set of metadata key:value pairs assigned to this instance. This includes user custom metadata, and predefined items created by Yandex Cloud Load Testing. + // +mapType=granular + ComputedMetadata map[string]*string `json:"computedMetadata,omitempty" tf:"computed_metadata,omitempty"` -// A set of key/value label pairs to assign to the instance. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the instance. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of metadata key/value pairs to make available from within the instance. -// +mapType=granular -Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + // A set of metadata key/value pairs to make available from within the instance. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` -// Network specifications for the instance. This can be used multiple times for adding multiple interfaces. The structure is documented below. -NetworkInterface []NetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + // Network specifications for the instance. This can be used multiple times for adding multiple interfaces. The structure is documented below. + NetworkInterface []NetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` -// The Compute platform of virtual machine. If it is not provided, the standard-v2 platform will be used. -PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + // The Compute platform of virtual machine. If it is not provided, the standard-v2 platform will be used. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` -// Compute resource specifications for the instance. The structure is documented below. -Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Compute resource specifications for the instance. The structure is documented below. + Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` -// The ID of the service account authorized for this load testing agent. Service account should have loadtesting.generatorClient or loadtesting.externalAgent role in the folder. -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // The ID of the service account authorized for this load testing agent. Service account should have loadtesting.generatorClient or loadtesting.externalAgent role in the folder. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// The availability zone where the virtual machine will be created. If it is not provided, the default provider folder is used. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // The availability zone where the virtual machine will be created. If it is not provided, the default provider folder is used. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type ComputeInstanceParameters struct { + // Boot disk specifications for the instance. The structure is documented below. + // +kubebuilder:validation:Optional + BootDisk []BootDiskParameters `json:"bootDisk" tf:"boot_disk,omitempty"` -// Boot disk specifications for the instance. The structure is documented below. -// +kubebuilder:validation:Optional -BootDisk []BootDiskParameters `json:"bootDisk" tf:"boot_disk,omitempty"` - -// A set of key/value label pairs to assign to the instance. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the instance. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of metadata key/value pairs to make available from within the instance. -// +kubebuilder:validation:Optional -// +mapType=granular -Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + // A set of metadata key/value pairs to make available from within the instance. + // +kubebuilder:validation:Optional + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` -// Network specifications for the instance. This can be used multiple times for adding multiple interfaces. The structure is documented below. -// +kubebuilder:validation:Optional -NetworkInterface []NetworkInterfaceParameters `json:"networkInterface" tf:"network_interface,omitempty"` + // Network specifications for the instance. This can be used multiple times for adding multiple interfaces. The structure is documented below. + // +kubebuilder:validation:Optional + NetworkInterface []NetworkInterfaceParameters `json:"networkInterface" tf:"network_interface,omitempty"` -// The Compute platform of virtual machine. If it is not provided, the standard-v2 platform will be used. -// +kubebuilder:validation:Optional -PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + // The Compute platform of virtual machine. If it is not provided, the standard-v2 platform will be used. + // +kubebuilder:validation:Optional + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` -// Compute resource specifications for the instance. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []ResourcesParameters `json:"resources" tf:"resources,omitempty"` + // Compute resource specifications for the instance. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []ResourcesParameters `json:"resources" tf:"resources,omitempty"` -// The ID of the service account authorized for this load testing agent. Service account should have loadtesting.generatorClient or loadtesting.externalAgent role in the folder. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // The ID of the service account authorized for this load testing agent. Service account should have loadtesting.generatorClient or loadtesting.externalAgent role in the folder. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Reference to a SecurityGroup in vpc to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + // Reference to a SecurityGroup in vpc to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` -// Selector for a SecurityGroup in vpc to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + // Selector for a SecurityGroup in vpc to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` -// The availability zone where the virtual machine will be created. If it is not provided, the default provider folder is used. -// +kubebuilder:validation:Optional -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // The availability zone where the virtual machine will be created. If it is not provided, the default provider folder is used. + // +kubebuilder:validation:Optional + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type InitializeParamsInitParameters struct { + // Block size of the disk, specified in bytes. + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` -// Block size of the disk, specified in bytes. -BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` + // A description of the boot disk. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A description of the boot disk. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // The name of the load testing agent. Must be unique within folder. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the load testing agent. Must be unique within folder. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The size of the disk in GB. Defaults to 15 GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` -// The size of the disk in GB. Defaults to 15 GB. -Size *float64 `json:"size,omitempty" tf:"size,omitempty"` - -// The disk type. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // The disk type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type InitializeParamsObservation struct { + // Block size of the disk, specified in bytes. + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` -// Block size of the disk, specified in bytes. -BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` - -// A description of the boot disk. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // A description of the boot disk. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The name of the load testing agent. Must be unique within folder. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the load testing agent. Must be unique within folder. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The size of the disk in GB. Defaults to 15 GB. -Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + // The size of the disk in GB. Defaults to 15 GB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` -// The disk type. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // The disk type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type InitializeParamsParameters struct { + // Block size of the disk, specified in bytes. + // +kubebuilder:validation:Optional + BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` -// Block size of the disk, specified in bytes. -// +kubebuilder:validation:Optional -BlockSize *float64 `json:"blockSize,omitempty" tf:"block_size,omitempty"` - -// A description of the boot disk. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // A description of the boot disk. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The name of the load testing agent. Must be unique within folder. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the load testing agent. Must be unique within folder. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The size of the disk in GB. Defaults to 15 GB. -// +kubebuilder:validation:Optional -Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + // The size of the disk in GB. Defaults to 15 GB. + // +kubebuilder:validation:Optional + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` -// The disk type. -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // The disk type. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type NetworkInterfaceInitParameters struct { + // Manual set static IP address. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` -// Manual set static IP address. -IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + // Flag for allocating IPv4 address for the network interface. + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` -// Flag for allocating IPv4 address for the network interface. -IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + // Flag for allocating IPv6 address for the network interface. + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` -// Flag for allocating IPv6 address for the network interface. -IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + // Manual set static IPv6 address. + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` -// Manual set static IPv6 address. -IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + // Flag for using NAT. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` -// Flag for using NAT. -NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + // A public address that can be used to access the internet over NAT. + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` -// A public address that can be used to access the internet over NAT. -NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + // Security group ids for network interface. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// Security group ids for network interface. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // The ID of the subnet to attach this interface to. The subnet must reside in the same zone where this instance was created. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet to attach this interface to. The subnet must reside in the same zone where this instance was created. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type NetworkInterfaceObservation struct { + // Manual set static IP address. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` -// Manual set static IP address. -IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + // Flag for allocating IPv4 address for the network interface. + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` -// Flag for allocating IPv4 address for the network interface. -IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + // Flag for allocating IPv6 address for the network interface. + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` -// Flag for allocating IPv6 address for the network interface. -IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + // Manual set static IPv6 address. + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` -// Manual set static IPv6 address. -IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + Index *float64 `json:"index,omitempty" tf:"index,omitempty"` -Index *float64 `json:"index,omitempty" tf:"index,omitempty"` + MacAddress *string `json:"macAddress,omitempty" tf:"mac_address,omitempty"` -MacAddress *string `json:"macAddress,omitempty" tf:"mac_address,omitempty"` + // Flag for using NAT. + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` -// Flag for using NAT. -NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + // A public address that can be used to access the internet over NAT. + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` -// A public address that can be used to access the internet over NAT. -NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + NATIPVersion *string `json:"natIpVersion,omitempty" tf:"nat_ip_version,omitempty"` -NATIPVersion *string `json:"natIpVersion,omitempty" tf:"nat_ip_version,omitempty"` + // Security group ids for network interface. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// Security group ids for network interface. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` - -// The ID of the subnet to attach this interface to. The subnet must reside in the same zone where this instance was created. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet to attach this interface to. The subnet must reside in the same zone where this instance was created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` } - type NetworkInterfaceParameters struct { + // Manual set static IP address. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` -// Manual set static IP address. -// +kubebuilder:validation:Optional -IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` - -// Flag for allocating IPv4 address for the network interface. -// +kubebuilder:validation:Optional -IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + // Flag for allocating IPv4 address for the network interface. + // +kubebuilder:validation:Optional + IPv4 *bool `json:"ipv4,omitempty" tf:"ipv4,omitempty"` -// Flag for allocating IPv6 address for the network interface. -// +kubebuilder:validation:Optional -IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + // Flag for allocating IPv6 address for the network interface. + // +kubebuilder:validation:Optional + IPv6 *bool `json:"ipv6,omitempty" tf:"ipv6,omitempty"` -// Manual set static IPv6 address. -// +kubebuilder:validation:Optional -IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + // Manual set static IPv6 address. + // +kubebuilder:validation:Optional + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` -// Flag for using NAT. -// +kubebuilder:validation:Optional -NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` + // Flag for using NAT. + // +kubebuilder:validation:Optional + NAT *bool `json:"nat,omitempty" tf:"nat,omitempty"` -// A public address that can be used to access the internet over NAT. -// +kubebuilder:validation:Optional -NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` + // A public address that can be used to access the internet over NAT. + // +kubebuilder:validation:Optional + NATIPAddress *string `json:"natIpAddress,omitempty" tf:"nat_ip_address,omitempty"` -// Security group ids for network interface. -// +kubebuilder:validation:Optional -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // Security group ids for network interface. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// The ID of the subnet to attach this interface to. The subnet must reside in the same zone where this instance was created. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet to attach this interface to. The subnet must reside in the same zone where this instance was created. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` } - type ResourcesInitParameters struct { + // If provided, specifies baseline core performance as a percent. + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` -// If provided, specifies baseline core performance as a percent. -CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` - -// The number of CPU cores for the instance. Defaults to 2 cores. -Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + // The number of CPU cores for the instance. Defaults to 2 cores. + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` -// The memory size in GB. Defaults to 2 GB. -Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + // The memory size in GB. Defaults to 2 GB. + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` } - type ResourcesObservation struct { + // If provided, specifies baseline core performance as a percent. + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` -// If provided, specifies baseline core performance as a percent. -CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` - -// The number of CPU cores for the instance. Defaults to 2 cores. -Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + // The number of CPU cores for the instance. Defaults to 2 cores. + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` -// The memory size in GB. Defaults to 2 GB. -Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + // The memory size in GB. Defaults to 2 GB. + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` } - type ResourcesParameters struct { + // If provided, specifies baseline core performance as a percent. + // +kubebuilder:validation:Optional + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` -// If provided, specifies baseline core performance as a percent. -// +kubebuilder:validation:Optional -CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + // The number of CPU cores for the instance. Defaults to 2 cores. + // +kubebuilder:validation:Optional + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` -// The number of CPU cores for the instance. Defaults to 2 cores. -// +kubebuilder:validation:Optional -Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` - -// The memory size in GB. Defaults to 2 GB. -// +kubebuilder:validation:Optional -Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + // The memory size in GB. Defaults to 2 GB. + // +kubebuilder:validation:Optional + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` } // AgentSpec defines the desired state of Agent type AgentSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider AgentParameters `json:"forProvider"` + ForProvider AgentParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -525,20 +484,19 @@ type AgentSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider AgentInitParameters `json:"initProvider,omitempty"` + InitProvider AgentInitParameters `json:"initProvider,omitempty"` } // AgentStatus defines the observed state of Agent. type AgentStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider AgentObservation `json:"atProvider,omitempty"` + AtProvider AgentObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Agent is the Schema for the Agents API. Manages an Yandex Cloud Load Testing Agent resource. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -548,10 +506,10 @@ type AgentStatus struct { type Agent struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.computeInstance) || (has(self.initProvider) && has(self.initProvider.computeInstance))",message="spec.forProvider.computeInstance is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec AgentSpec `json:"spec"` - Status AgentStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.computeInstance) || (has(self.initProvider) && has(self.initProvider.computeInstance))",message="spec.forProvider.computeInstance is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec AgentSpec `json:"spec"` + Status AgentStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/loadtesting/v1alpha1/zz_generated.conversion_hubs.go b/apis/loadtesting/v1alpha1/zz_generated.conversion_hubs.go index e8d9fe5..6387790 100755 --- a/apis/loadtesting/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/loadtesting/v1alpha1/zz_generated.conversion_hubs.go @@ -1,10 +1,6 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 - - // Hub marks this type as a conversion hub. - func (tr *Agent) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *Agent) Hub() {} diff --git a/apis/loadtesting/v1alpha1/zz_generated.deepcopy.go b/apis/loadtesting/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b169244 --- /dev/null +++ b/apis/loadtesting/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1123 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Agent) DeepCopyInto(out *Agent) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Agent. +func (in *Agent) DeepCopy() *Agent { + if in == nil { + return nil + } + out := new(Agent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Agent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentInitParameters) DeepCopyInto(out *AgentInitParameters) { + *out = *in + if in.ComputeInstance != nil { + in, out := &in.ComputeInstance, &out.ComputeInstance + *out = make([]ComputeInstanceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentInitParameters. +func (in *AgentInitParameters) DeepCopy() *AgentInitParameters { + if in == nil { + return nil + } + out := new(AgentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentList) DeepCopyInto(out *AgentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Agent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentList. +func (in *AgentList) DeepCopy() *AgentList { + if in == nil { + return nil + } + out := new(AgentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AgentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentObservation) DeepCopyInto(out *AgentObservation) { + *out = *in + if in.ComputeInstance != nil { + in, out := &in.ComputeInstance, &out.ComputeInstance + *out = make([]ComputeInstanceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ComputeInstanceID != nil { + in, out := &in.ComputeInstanceID, &out.ComputeInstanceID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentObservation. +func (in *AgentObservation) DeepCopy() *AgentObservation { + if in == nil { + return nil + } + out := new(AgentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentParameters) DeepCopyInto(out *AgentParameters) { + *out = *in + if in.ComputeInstance != nil { + in, out := &in.ComputeInstance, &out.ComputeInstance + *out = make([]ComputeInstanceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentParameters. +func (in *AgentParameters) DeepCopy() *AgentParameters { + if in == nil { + return nil + } + out := new(AgentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentSpec) DeepCopyInto(out *AgentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentSpec. +func (in *AgentSpec) DeepCopy() *AgentSpec { + if in == nil { + return nil + } + out := new(AgentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentStatus) DeepCopyInto(out *AgentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentStatus. +func (in *AgentStatus) DeepCopy() *AgentStatus { + if in == nil { + return nil + } + out := new(AgentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskInitParameters) DeepCopyInto(out *BootDiskInitParameters) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]InitializeParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskInitParameters. +func (in *BootDiskInitParameters) DeepCopy() *BootDiskInitParameters { + if in == nil { + return nil + } + out := new(BootDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskObservation) DeepCopyInto(out *BootDiskObservation) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]InitializeParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskObservation. +func (in *BootDiskObservation) DeepCopy() *BootDiskObservation { + if in == nil { + return nil + } + out := new(BootDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiskParameters) DeepCopyInto(out *BootDiskParameters) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.InitializeParams != nil { + in, out := &in.InitializeParams, &out.InitializeParams + *out = make([]InitializeParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiskParameters. +func (in *BootDiskParameters) DeepCopy() *BootDiskParameters { + if in == nil { + return nil + } + out := new(BootDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceInitParameters) DeepCopyInto(out *ComputeInstanceInitParameters) { + *out = *in + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]BootDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceInitParameters. +func (in *ComputeInstanceInitParameters) DeepCopy() *ComputeInstanceInitParameters { + if in == nil { + return nil + } + out := new(ComputeInstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceObservation) DeepCopyInto(out *ComputeInstanceObservation) { + *out = *in + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]BootDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ComputedLabels != nil { + in, out := &in.ComputedLabels, &out.ComputedLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ComputedMetadata != nil { + in, out := &in.ComputedMetadata, &out.ComputedMetadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceObservation. +func (in *ComputeInstanceObservation) DeepCopy() *ComputeInstanceObservation { + if in == nil { + return nil + } + out := new(ComputeInstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceParameters) DeepCopyInto(out *ComputeInstanceParameters) { + *out = *in + if in.BootDisk != nil { + in, out := &in.BootDisk, &out.BootDisk + *out = make([]BootDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceParameters. +func (in *ComputeInstanceParameters) DeepCopy() *ComputeInstanceParameters { + if in == nil { + return nil + } + out := new(ComputeInstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializeParamsInitParameters) DeepCopyInto(out *InitializeParamsInitParameters) { + *out = *in + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializeParamsInitParameters. +func (in *InitializeParamsInitParameters) DeepCopy() *InitializeParamsInitParameters { + if in == nil { + return nil + } + out := new(InitializeParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializeParamsObservation) DeepCopyInto(out *InitializeParamsObservation) { + *out = *in + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializeParamsObservation. +func (in *InitializeParamsObservation) DeepCopy() *InitializeParamsObservation { + if in == nil { + return nil + } + out := new(InitializeParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializeParamsParameters) DeepCopyInto(out *InitializeParamsParameters) { + *out = *in + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializeParamsParameters. +func (in *InitializeParamsParameters) DeepCopy() *InitializeParamsParameters { + if in == nil { + return nil + } + out := new(InitializeParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceInitParameters) DeepCopyInto(out *NetworkInterfaceInitParameters) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceInitParameters. +func (in *NetworkInterfaceInitParameters) DeepCopy() *NetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceObservation) DeepCopyInto(out *NetworkInterfaceObservation) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(float64) + **out = **in + } + if in.MacAddress != nil { + in, out := &in.MacAddress, &out.MacAddress + *out = new(string) + **out = **in + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.NATIPVersion != nil { + in, out := &in.NATIPVersion, &out.NATIPVersion + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceObservation. +func (in *NetworkInterfaceObservation) DeepCopy() *NetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceParameters) DeepCopyInto(out *NetworkInterfaceParameters) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(bool) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.NAT != nil { + in, out := &in.NAT, &out.NAT + *out = new(bool) + **out = **in + } + if in.NATIPAddress != nil { + in, out := &in.NATIPAddress, &out.NATIPAddress + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceParameters. +func (in *NetworkInterfaceParameters) DeepCopy() *NetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesInitParameters) DeepCopyInto(out *ResourcesInitParameters) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesInitParameters. +func (in *ResourcesInitParameters) DeepCopy() *ResourcesInitParameters { + if in == nil { + return nil + } + out := new(ResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesObservation) DeepCopyInto(out *ResourcesObservation) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesObservation. +func (in *ResourcesObservation) DeepCopy() *ResourcesObservation { + if in == nil { + return nil + } + out := new(ResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesParameters) DeepCopyInto(out *ResourcesParameters) { + *out = *in + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesParameters. +func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { + if in == nil { + return nil + } + out := new(ResourcesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/loadtesting/v1alpha1/zz_generated.resolvers.go b/apis/loadtesting/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..c850bc8 --- /dev/null +++ b/apis/loadtesting/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,130 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Agent. +func (mg *Agent) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.ComputeInstance); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.ComputeInstance[i3].NetworkInterface); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha1.SubnetList{}, + Managed: &v1alpha1.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetID") + } + mg.Spec.ForProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.ComputeInstance); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ComputeInstance[i3].ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ComputeInstance[i3].ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ComputeInstance[i3].ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha1.SecurityGroupList{}, + Managed: &v1alpha1.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ComputeInstance[i3].ServiceAccountID") + } + mg.Spec.ForProvider.ComputeInstance[i3].ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ComputeInstance[i3].ServiceAccountIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha11.FolderList{}, + Managed: &v1alpha11.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.ComputeInstance); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.ComputeInstance[i3].NetworkInterface); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha1.SubnetList{}, + Managed: &v1alpha1.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetID") + } + mg.Spec.InitProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ComputeInstance[i3].NetworkInterface[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.ComputeInstance); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ComputeInstance[i3].ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ComputeInstance[i3].ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ComputeInstance[i3].ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha1.SecurityGroupList{}, + Managed: &v1alpha1.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ComputeInstance[i3].ServiceAccountID") + } + mg.Spec.InitProvider.ComputeInstance[i3].ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ComputeInstance[i3].ServiceAccountIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha11.FolderList{}, + Managed: &v1alpha11.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/loadtesting/v1alpha1/zz_groupversion_info.go b/apis/loadtesting/v1alpha1/zz_groupversion_info.go index a81341a..9b07d96 100755 --- a/apis/loadtesting/v1alpha1/zz_groupversion_info.go +++ b/apis/loadtesting/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/lockbox/v1alpha1/zz_generated.conversion_hubs.go b/apis/lockbox/v1alpha1/zz_generated.conversion_hubs.go index 1b15efb..758455a 100755 --- a/apis/lockbox/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/lockbox/v1alpha1/zz_generated.conversion_hubs.go @@ -1,16 +1,12 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *Secret) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *Secret) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *SecretIAMBinding) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *SecretVersion) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *SecretIAMBinding) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *SecretVersion) Hub() {} diff --git a/apis/lockbox/v1alpha1/zz_generated.deepcopy.go b/apis/lockbox/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..c4de331 --- /dev/null +++ b/apis/lockbox/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1226 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandInitParameters) DeepCopyInto(out *CommandInitParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandInitParameters. +func (in *CommandInitParameters) DeepCopy() *CommandInitParameters { + if in == nil { + return nil + } + out := new(CommandInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandObservation) DeepCopyInto(out *CommandObservation) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandObservation. +func (in *CommandObservation) DeepCopy() *CommandObservation { + if in == nil { + return nil + } + out := new(CommandObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandParameters) DeepCopyInto(out *CommandParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandParameters. +func (in *CommandParameters) DeepCopy() *CommandParameters { + if in == nil { + return nil + } + out := new(CommandParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EntriesInitParameters) DeepCopyInto(out *EntriesInitParameters) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]CommandInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.TextValueSecretRef != nil { + in, out := &in.TextValueSecretRef, &out.TextValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntriesInitParameters. +func (in *EntriesInitParameters) DeepCopy() *EntriesInitParameters { + if in == nil { + return nil + } + out := new(EntriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EntriesObservation) DeepCopyInto(out *EntriesObservation) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]CommandObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntriesObservation. +func (in *EntriesObservation) DeepCopy() *EntriesObservation { + if in == nil { + return nil + } + out := new(EntriesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EntriesParameters) DeepCopyInto(out *EntriesParameters) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]CommandParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.TextValueSecretRef != nil { + in, out := &in.TextValueSecretRef, &out.TextValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntriesParameters. +func (in *EntriesParameters) DeepCopy() *EntriesParameters { + if in == nil { + return nil + } + out := new(EntriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordPayloadSpecificationInitParameters) DeepCopyInto(out *PasswordPayloadSpecificationInitParameters) { + *out = *in + if in.ExcludedPunctuation != nil { + in, out := &in.ExcludedPunctuation, &out.ExcludedPunctuation + *out = new(string) + **out = **in + } + if in.IncludeDigits != nil { + in, out := &in.IncludeDigits, &out.IncludeDigits + *out = new(bool) + **out = **in + } + if in.IncludeLowercase != nil { + in, out := &in.IncludeLowercase, &out.IncludeLowercase + *out = new(bool) + **out = **in + } + if in.IncludePunctuation != nil { + in, out := &in.IncludePunctuation, &out.IncludePunctuation + *out = new(bool) + **out = **in + } + if in.IncludeUppercase != nil { + in, out := &in.IncludeUppercase, &out.IncludeUppercase + *out = new(bool) + **out = **in + } + if in.IncludedPunctuation != nil { + in, out := &in.IncludedPunctuation, &out.IncludedPunctuation + *out = new(string) + **out = **in + } + if in.Length != nil { + in, out := &in.Length, &out.Length + *out = new(float64) + **out = **in + } + if in.PasswordKey != nil { + in, out := &in.PasswordKey, &out.PasswordKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordPayloadSpecificationInitParameters. +func (in *PasswordPayloadSpecificationInitParameters) DeepCopy() *PasswordPayloadSpecificationInitParameters { + if in == nil { + return nil + } + out := new(PasswordPayloadSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordPayloadSpecificationObservation) DeepCopyInto(out *PasswordPayloadSpecificationObservation) { + *out = *in + if in.ExcludedPunctuation != nil { + in, out := &in.ExcludedPunctuation, &out.ExcludedPunctuation + *out = new(string) + **out = **in + } + if in.IncludeDigits != nil { + in, out := &in.IncludeDigits, &out.IncludeDigits + *out = new(bool) + **out = **in + } + if in.IncludeLowercase != nil { + in, out := &in.IncludeLowercase, &out.IncludeLowercase + *out = new(bool) + **out = **in + } + if in.IncludePunctuation != nil { + in, out := &in.IncludePunctuation, &out.IncludePunctuation + *out = new(bool) + **out = **in + } + if in.IncludeUppercase != nil { + in, out := &in.IncludeUppercase, &out.IncludeUppercase + *out = new(bool) + **out = **in + } + if in.IncludedPunctuation != nil { + in, out := &in.IncludedPunctuation, &out.IncludedPunctuation + *out = new(string) + **out = **in + } + if in.Length != nil { + in, out := &in.Length, &out.Length + *out = new(float64) + **out = **in + } + if in.PasswordKey != nil { + in, out := &in.PasswordKey, &out.PasswordKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordPayloadSpecificationObservation. +func (in *PasswordPayloadSpecificationObservation) DeepCopy() *PasswordPayloadSpecificationObservation { + if in == nil { + return nil + } + out := new(PasswordPayloadSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordPayloadSpecificationParameters) DeepCopyInto(out *PasswordPayloadSpecificationParameters) { + *out = *in + if in.ExcludedPunctuation != nil { + in, out := &in.ExcludedPunctuation, &out.ExcludedPunctuation + *out = new(string) + **out = **in + } + if in.IncludeDigits != nil { + in, out := &in.IncludeDigits, &out.IncludeDigits + *out = new(bool) + **out = **in + } + if in.IncludeLowercase != nil { + in, out := &in.IncludeLowercase, &out.IncludeLowercase + *out = new(bool) + **out = **in + } + if in.IncludePunctuation != nil { + in, out := &in.IncludePunctuation, &out.IncludePunctuation + *out = new(bool) + **out = **in + } + if in.IncludeUppercase != nil { + in, out := &in.IncludeUppercase, &out.IncludeUppercase + *out = new(bool) + **out = **in + } + if in.IncludedPunctuation != nil { + in, out := &in.IncludedPunctuation, &out.IncludedPunctuation + *out = new(string) + **out = **in + } + if in.Length != nil { + in, out := &in.Length, &out.Length + *out = new(float64) + **out = **in + } + if in.PasswordKey != nil { + in, out := &in.PasswordKey, &out.PasswordKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordPayloadSpecificationParameters. +func (in *PasswordPayloadSpecificationParameters) DeepCopy() *PasswordPayloadSpecificationParameters { + if in == nil { + return nil + } + out := new(PasswordPayloadSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Secret) DeepCopyInto(out *Secret) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. +func (in *Secret) DeepCopy() *Secret { + if in == nil { + return nil + } + out := new(Secret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Secret) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIAMBinding) DeepCopyInto(out *SecretIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIAMBinding. +func (in *SecretIAMBinding) DeepCopy() *SecretIAMBinding { + if in == nil { + return nil + } + out := new(SecretIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIAMBindingInitParameters) DeepCopyInto(out *SecretIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } + if in.SecretIDRef != nil { + in, out := &in.SecretIDRef, &out.SecretIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretIDSelector != nil { + in, out := &in.SecretIDSelector, &out.SecretIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIAMBindingInitParameters. +func (in *SecretIAMBindingInitParameters) DeepCopy() *SecretIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(SecretIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIAMBindingList) DeepCopyInto(out *SecretIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecretIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIAMBindingList. +func (in *SecretIAMBindingList) DeepCopy() *SecretIAMBindingList { + if in == nil { + return nil + } + out := new(SecretIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIAMBindingObservation) DeepCopyInto(out *SecretIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIAMBindingObservation. +func (in *SecretIAMBindingObservation) DeepCopy() *SecretIAMBindingObservation { + if in == nil { + return nil + } + out := new(SecretIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIAMBindingParameters) DeepCopyInto(out *SecretIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } + if in.SecretIDRef != nil { + in, out := &in.SecretIDRef, &out.SecretIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretIDSelector != nil { + in, out := &in.SecretIDSelector, &out.SecretIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIAMBindingParameters. +func (in *SecretIAMBindingParameters) DeepCopy() *SecretIAMBindingParameters { + if in == nil { + return nil + } + out := new(SecretIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIAMBindingSpec) DeepCopyInto(out *SecretIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIAMBindingSpec. +func (in *SecretIAMBindingSpec) DeepCopy() *SecretIAMBindingSpec { + if in == nil { + return nil + } + out := new(SecretIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIAMBindingStatus) DeepCopyInto(out *SecretIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIAMBindingStatus. +func (in *SecretIAMBindingStatus) DeepCopy() *SecretIAMBindingStatus { + if in == nil { + return nil + } + out := new(SecretIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretInitParameters) DeepCopyInto(out *SecretInitParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordPayloadSpecification != nil { + in, out := &in.PasswordPayloadSpecification, &out.PasswordPayloadSpecification + *out = make([]PasswordPayloadSpecificationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInitParameters. +func (in *SecretInitParameters) DeepCopy() *SecretInitParameters { + if in == nil { + return nil + } + out := new(SecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretObservation) DeepCopyInto(out *SecretObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordPayloadSpecification != nil { + in, out := &in.PasswordPayloadSpecification, &out.PasswordPayloadSpecification + *out = make([]PasswordPayloadSpecificationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretObservation. +func (in *SecretObservation) DeepCopy() *SecretObservation { + if in == nil { + return nil + } + out := new(SecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretParameters) DeepCopyInto(out *SecretParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordPayloadSpecification != nil { + in, out := &in.PasswordPayloadSpecification, &out.PasswordPayloadSpecification + *out = make([]PasswordPayloadSpecificationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretParameters. +func (in *SecretParameters) DeepCopy() *SecretParameters { + if in == nil { + return nil + } + out := new(SecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretSpec) DeepCopyInto(out *SecretSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSpec. +func (in *SecretSpec) DeepCopy() *SecretSpec { + if in == nil { + return nil + } + out := new(SecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretStatus) DeepCopyInto(out *SecretStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretStatus. +func (in *SecretStatus) DeepCopy() *SecretStatus { + if in == nil { + return nil + } + out := new(SecretStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVersion) DeepCopyInto(out *SecretVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVersion. +func (in *SecretVersion) DeepCopy() *SecretVersion { + if in == nil { + return nil + } + out := new(SecretVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVersionInitParameters) DeepCopyInto(out *SecretVersionInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Entries != nil { + in, out := &in.Entries, &out.Entries + *out = make([]EntriesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } + if in.SecretIDRef != nil { + in, out := &in.SecretIDRef, &out.SecretIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretIDSelector != nil { + in, out := &in.SecretIDSelector, &out.SecretIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVersionInitParameters. +func (in *SecretVersionInitParameters) DeepCopy() *SecretVersionInitParameters { + if in == nil { + return nil + } + out := new(SecretVersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVersionList) DeepCopyInto(out *SecretVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecretVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVersionList. +func (in *SecretVersionList) DeepCopy() *SecretVersionList { + if in == nil { + return nil + } + out := new(SecretVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVersionObservation) DeepCopyInto(out *SecretVersionObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Entries != nil { + in, out := &in.Entries, &out.Entries + *out = make([]EntriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVersionObservation. +func (in *SecretVersionObservation) DeepCopy() *SecretVersionObservation { + if in == nil { + return nil + } + out := new(SecretVersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVersionParameters) DeepCopyInto(out *SecretVersionParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Entries != nil { + in, out := &in.Entries, &out.Entries + *out = make([]EntriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } + if in.SecretIDRef != nil { + in, out := &in.SecretIDRef, &out.SecretIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretIDSelector != nil { + in, out := &in.SecretIDSelector, &out.SecretIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVersionParameters. +func (in *SecretVersionParameters) DeepCopy() *SecretVersionParameters { + if in == nil { + return nil + } + out := new(SecretVersionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVersionSpec) DeepCopyInto(out *SecretVersionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVersionSpec. +func (in *SecretVersionSpec) DeepCopy() *SecretVersionSpec { + if in == nil { + return nil + } + out := new(SecretVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVersionStatus) DeepCopyInto(out *SecretVersionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVersionStatus. +func (in *SecretVersionStatus) DeepCopy() *SecretVersionStatus { + if in == nil { + return nil + } + out := new(SecretVersionStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/lockbox/v1alpha1/zz_generated.resolvers.go b/apis/lockbox/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..96faadc --- /dev/null +++ b/apis/lockbox/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,205 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + iam "github.com/tagesjump/provider-upjet-yc/config/iam" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Secret. +func (mg *Secret) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{ + List: &v1alpha11.SymmetricKeyList{}, + Managed: &v1alpha11.SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{ + List: &v1alpha11.SymmetricKeyList{}, + Managed: &v1alpha11.SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SecretIAMBinding. +func (mg *SecretIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.ForProvider.ServiceAccountRef, + Selector: mg.Spec.ForProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Members") + } + mg.Spec.ForProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SecretID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SecretIDRef, + Selector: mg.Spec.ForProvider.SecretIDSelector, + To: reference.To{ + List: &SecretList{}, + Managed: &Secret{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecretID") + } + mg.Spec.ForProvider.SecretID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SecretIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.InitProvider.ServiceAccountRef, + Selector: mg.Spec.InitProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Members") + } + mg.Spec.InitProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SecretID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SecretIDRef, + Selector: mg.Spec.InitProvider.SecretIDSelector, + To: reference.To{ + List: &SecretList{}, + Managed: &Secret{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecretID") + } + mg.Spec.InitProvider.SecretID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SecretIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SecretVersion. +func (mg *SecretVersion) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SecretID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SecretIDRef, + Selector: mg.Spec.ForProvider.SecretIDSelector, + To: reference.To{ + List: &SecretList{}, + Managed: &Secret{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecretID") + } + mg.Spec.ForProvider.SecretID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SecretIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SecretID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SecretIDRef, + Selector: mg.Spec.InitProvider.SecretIDSelector, + To: reference.To{ + List: &SecretList{}, + Managed: &Secret{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecretID") + } + mg.Spec.InitProvider.SecretID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SecretIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/lockbox/v1alpha1/zz_groupversion_info.go b/apis/lockbox/v1alpha1/zz_groupversion_info.go index 9695cbe..0e24952 100755 --- a/apis/lockbox/v1alpha1/zz_groupversion_info.go +++ b/apis/lockbox/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/lockbox/v1alpha1/zz_secret_terraformed.go b/apis/lockbox/v1alpha1/zz_secret_terraformed.go index d2ea5a5..d5f25f7 100755 --- a/apis/lockbox/v1alpha1/zz_secret_terraformed.go +++ b/apis/lockbox/v1alpha1/zz_secret_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Secret func (mg *Secret) GetTerraformResourceType() string { - return "yandex_lockbox_secret" + return "yandex_lockbox_secret" } // GetConnectionDetailsMapping for this Secret func (tr *Secret) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Secret func (tr *Secret) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Secret func (tr *Secret) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Secret func (tr *Secret) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Secret func (tr *Secret) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Secret func (tr *Secret) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Secret func (tr *Secret) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Secret func (tr *Secret) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Secret using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Secret) LateInitialize(attrs []byte) (bool, error) { - params := &SecretParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &SecretParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Secret) GetTerraformSchemaVersion() int { - return 1 + return 1 } diff --git a/apis/lockbox/v1alpha1/zz_secret_types.go b/apis/lockbox/v1alpha1/zz_secret_types.go index 8ef1b73..c895002 100755 --- a/apis/lockbox/v1alpha1/zz_secret_types.go +++ b/apis/lockbox/v1alpha1/zz_secret_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,242 +7,227 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type PasswordPayloadSpecificationInitParameters struct { + // String of punctuation characters to exclude from the default. Requires include_punctuation = true. Default is empty. + ExcludedPunctuation *string `json:"excludedPunctuation,omitempty" tf:"excluded_punctuation,omitempty"` -// String of punctuation characters to exclude from the default. Requires include_punctuation = true. Default is empty. -ExcludedPunctuation *string `json:"excludedPunctuation,omitempty" tf:"excluded_punctuation,omitempty"` - -// Use digits in the generated password. Default is true. -IncludeDigits *bool `json:"includeDigits,omitempty" tf:"include_digits,omitempty"` + // Use digits in the generated password. Default is true. + IncludeDigits *bool `json:"includeDigits,omitempty" tf:"include_digits,omitempty"` -// Use lowercase letters in the generated password. Default is true. -IncludeLowercase *bool `json:"includeLowercase,omitempty" tf:"include_lowercase,omitempty"` + // Use lowercase letters in the generated password. Default is true. + IncludeLowercase *bool `json:"includeLowercase,omitempty" tf:"include_lowercase,omitempty"` -// Use punctuations (!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`) in the generated password. Default is true. -IncludePunctuation *bool `json:"includePunctuation,omitempty" tf:"include_punctuation,omitempty"` + // Use punctuations (!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`) in the generated password. Default is true. + IncludePunctuation *bool `json:"includePunctuation,omitempty" tf:"include_punctuation,omitempty"` -// Use capital letters in the generated password. Default is true. -IncludeUppercase *bool `json:"includeUppercase,omitempty" tf:"include_uppercase,omitempty"` + // Use capital letters in the generated password. Default is true. + IncludeUppercase *bool `json:"includeUppercase,omitempty" tf:"include_uppercase,omitempty"` -// String of specific punctuation characters to use. Requires include_punctuation = true. Default is empty. -IncludedPunctuation *string `json:"includedPunctuation,omitempty" tf:"included_punctuation,omitempty"` + // String of specific punctuation characters to use. Requires include_punctuation = true. Default is empty. + IncludedPunctuation *string `json:"includedPunctuation,omitempty" tf:"included_punctuation,omitempty"` -// Length of generated password. Default is 36. -Length *float64 `json:"length,omitempty" tf:"length,omitempty"` + // Length of generated password. Default is 36. + Length *float64 `json:"length,omitempty" tf:"length,omitempty"` -// The key with which the generated password will be placed in the secret version. -PasswordKey *string `json:"passwordKey,omitempty" tf:"password_key,omitempty"` + // The key with which the generated password will be placed in the secret version. + PasswordKey *string `json:"passwordKey,omitempty" tf:"password_key,omitempty"` } - type PasswordPayloadSpecificationObservation struct { + // String of punctuation characters to exclude from the default. Requires include_punctuation = true. Default is empty. + ExcludedPunctuation *string `json:"excludedPunctuation,omitempty" tf:"excluded_punctuation,omitempty"` -// String of punctuation characters to exclude from the default. Requires include_punctuation = true. Default is empty. -ExcludedPunctuation *string `json:"excludedPunctuation,omitempty" tf:"excluded_punctuation,omitempty"` - -// Use digits in the generated password. Default is true. -IncludeDigits *bool `json:"includeDigits,omitempty" tf:"include_digits,omitempty"` + // Use digits in the generated password. Default is true. + IncludeDigits *bool `json:"includeDigits,omitempty" tf:"include_digits,omitempty"` -// Use lowercase letters in the generated password. Default is true. -IncludeLowercase *bool `json:"includeLowercase,omitempty" tf:"include_lowercase,omitempty"` + // Use lowercase letters in the generated password. Default is true. + IncludeLowercase *bool `json:"includeLowercase,omitempty" tf:"include_lowercase,omitempty"` -// Use punctuations (!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`) in the generated password. Default is true. -IncludePunctuation *bool `json:"includePunctuation,omitempty" tf:"include_punctuation,omitempty"` + // Use punctuations (!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`) in the generated password. Default is true. + IncludePunctuation *bool `json:"includePunctuation,omitempty" tf:"include_punctuation,omitempty"` -// Use capital letters in the generated password. Default is true. -IncludeUppercase *bool `json:"includeUppercase,omitempty" tf:"include_uppercase,omitempty"` + // Use capital letters in the generated password. Default is true. + IncludeUppercase *bool `json:"includeUppercase,omitempty" tf:"include_uppercase,omitempty"` -// String of specific punctuation characters to use. Requires include_punctuation = true. Default is empty. -IncludedPunctuation *string `json:"includedPunctuation,omitempty" tf:"included_punctuation,omitempty"` + // String of specific punctuation characters to use. Requires include_punctuation = true. Default is empty. + IncludedPunctuation *string `json:"includedPunctuation,omitempty" tf:"included_punctuation,omitempty"` -// Length of generated password. Default is 36. -Length *float64 `json:"length,omitempty" tf:"length,omitempty"` + // Length of generated password. Default is 36. + Length *float64 `json:"length,omitempty" tf:"length,omitempty"` -// The key with which the generated password will be placed in the secret version. -PasswordKey *string `json:"passwordKey,omitempty" tf:"password_key,omitempty"` + // The key with which the generated password will be placed in the secret version. + PasswordKey *string `json:"passwordKey,omitempty" tf:"password_key,omitempty"` } - type PasswordPayloadSpecificationParameters struct { + // String of punctuation characters to exclude from the default. Requires include_punctuation = true. Default is empty. + // +kubebuilder:validation:Optional + ExcludedPunctuation *string `json:"excludedPunctuation,omitempty" tf:"excluded_punctuation,omitempty"` -// String of punctuation characters to exclude from the default. Requires include_punctuation = true. Default is empty. -// +kubebuilder:validation:Optional -ExcludedPunctuation *string `json:"excludedPunctuation,omitempty" tf:"excluded_punctuation,omitempty"` - -// Use digits in the generated password. Default is true. -// +kubebuilder:validation:Optional -IncludeDigits *bool `json:"includeDigits,omitempty" tf:"include_digits,omitempty"` + // Use digits in the generated password. Default is true. + // +kubebuilder:validation:Optional + IncludeDigits *bool `json:"includeDigits,omitempty" tf:"include_digits,omitempty"` -// Use lowercase letters in the generated password. Default is true. -// +kubebuilder:validation:Optional -IncludeLowercase *bool `json:"includeLowercase,omitempty" tf:"include_lowercase,omitempty"` + // Use lowercase letters in the generated password. Default is true. + // +kubebuilder:validation:Optional + IncludeLowercase *bool `json:"includeLowercase,omitempty" tf:"include_lowercase,omitempty"` -// Use punctuations (!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`) in the generated password. Default is true. -// +kubebuilder:validation:Optional -IncludePunctuation *bool `json:"includePunctuation,omitempty" tf:"include_punctuation,omitempty"` + // Use punctuations (!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`) in the generated password. Default is true. + // +kubebuilder:validation:Optional + IncludePunctuation *bool `json:"includePunctuation,omitempty" tf:"include_punctuation,omitempty"` -// Use capital letters in the generated password. Default is true. -// +kubebuilder:validation:Optional -IncludeUppercase *bool `json:"includeUppercase,omitempty" tf:"include_uppercase,omitempty"` + // Use capital letters in the generated password. Default is true. + // +kubebuilder:validation:Optional + IncludeUppercase *bool `json:"includeUppercase,omitempty" tf:"include_uppercase,omitempty"` -// String of specific punctuation characters to use. Requires include_punctuation = true. Default is empty. -// +kubebuilder:validation:Optional -IncludedPunctuation *string `json:"includedPunctuation,omitempty" tf:"included_punctuation,omitempty"` + // String of specific punctuation characters to use. Requires include_punctuation = true. Default is empty. + // +kubebuilder:validation:Optional + IncludedPunctuation *string `json:"includedPunctuation,omitempty" tf:"included_punctuation,omitempty"` -// Length of generated password. Default is 36. -// +kubebuilder:validation:Optional -Length *float64 `json:"length,omitempty" tf:"length,omitempty"` + // Length of generated password. Default is 36. + // +kubebuilder:validation:Optional + Length *float64 `json:"length,omitempty" tf:"length,omitempty"` -// The key with which the generated password will be placed in the secret version. -// +kubebuilder:validation:Optional -PasswordKey *string `json:"passwordKey" tf:"password_key,omitempty"` + // The key with which the generated password will be placed in the secret version. + // +kubebuilder:validation:Optional + PasswordKey *string `json:"passwordKey" tf:"password_key,omitempty"` } - type SecretInitParameters struct { + // Whether the Yandex Cloud Lockbox secret is protected from deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Whether the Yandex Cloud Lockbox secret is protected from deletion. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // A description for the Yandex Cloud Lockbox secret. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A description for the Yandex Cloud Lockbox secret. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // ID of the folder that the Yandex Cloud Lockbox secret belongs to. It will be deduced from provider configuration if not set explicitly. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder that the Yandex Cloud Lockbox secret belongs to. It will be deduced from provider configuration if not set explicitly. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // The KMS key used to encrypt the Yandex Cloud Lockbox secret. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1.SymmetricKey + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` -// The KMS key used to encrypt the Yandex Cloud Lockbox secret. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1.SymmetricKey -KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + // Reference to a SymmetricKey in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` -// Reference to a SymmetricKey in kms to populate kmsKeyId. -// +kubebuilder:validation:Optional -KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + // Selector for a SymmetricKey in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` -// Selector for a SymmetricKey in kms to populate kmsKeyId. -// +kubebuilder:validation:Optional -KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + // A set of key/value label pairs to assign to the Yandex Cloud Lockbox secret. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Lockbox secret. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name for the Yandex Cloud Lockbox secret. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name for the Yandex Cloud Lockbox secret. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Payload specification for password generation. -PasswordPayloadSpecification []PasswordPayloadSpecificationInitParameters `json:"passwordPayloadSpecification,omitempty" tf:"password_payload_specification,omitempty"` + // Payload specification for password generation. + PasswordPayloadSpecification []PasswordPayloadSpecificationInitParameters `json:"passwordPayloadSpecification,omitempty" tf:"password_payload_specification,omitempty"` } - type SecretObservation struct { + // The Yandex Cloud Lockbox secret creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// The Yandex Cloud Lockbox secret creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Whether the Yandex Cloud Lockbox secret is protected from deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Whether the Yandex Cloud Lockbox secret is protected from deletion. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // A description for the Yandex Cloud Lockbox secret. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A description for the Yandex Cloud Lockbox secret. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // ID of the folder that the Yandex Cloud Lockbox secret belongs to. It will be deduced from provider configuration if not set explicitly. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder that the Yandex Cloud Lockbox secret belongs to. It will be deduced from provider configuration if not set explicitly. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The KMS key used to encrypt the Yandex Cloud Lockbox secret. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` -// The KMS key used to encrypt the Yandex Cloud Lockbox secret. -KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud Lockbox secret. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Lockbox secret. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name for the Yandex Cloud Lockbox secret. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name for the Yandex Cloud Lockbox secret. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Payload specification for password generation. + PasswordPayloadSpecification []PasswordPayloadSpecificationObservation `json:"passwordPayloadSpecification,omitempty" tf:"password_payload_specification,omitempty"` -// Payload specification for password generation. -PasswordPayloadSpecification []PasswordPayloadSpecificationObservation `json:"passwordPayloadSpecification,omitempty" tf:"password_payload_specification,omitempty"` - -// The Yandex Cloud Lockbox secret status. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // The Yandex Cloud Lockbox secret status. + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type SecretParameters struct { + // Whether the Yandex Cloud Lockbox secret is protected from deletion. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Whether the Yandex Cloud Lockbox secret is protected from deletion. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // A description for the Yandex Cloud Lockbox secret. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A description for the Yandex Cloud Lockbox secret. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // ID of the folder that the Yandex Cloud Lockbox secret belongs to. It will be deduced from provider configuration if not set explicitly. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder that the Yandex Cloud Lockbox secret belongs to. It will be deduced from provider configuration if not set explicitly. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // The KMS key used to encrypt the Yandex Cloud Lockbox secret. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1.SymmetricKey + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` -// The KMS key used to encrypt the Yandex Cloud Lockbox secret. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1.SymmetricKey -// +kubebuilder:validation:Optional -KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + // Reference to a SymmetricKey in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` -// Reference to a SymmetricKey in kms to populate kmsKeyId. -// +kubebuilder:validation:Optional -KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + // Selector for a SymmetricKey in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` -// Selector for a SymmetricKey in kms to populate kmsKeyId. -// +kubebuilder:validation:Optional -KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + // A set of key/value label pairs to assign to the Yandex Cloud Lockbox secret. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Lockbox secret. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name for the Yandex Cloud Lockbox secret. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name for the Yandex Cloud Lockbox secret. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Payload specification for password generation. -// +kubebuilder:validation:Optional -PasswordPayloadSpecification []PasswordPayloadSpecificationParameters `json:"passwordPayloadSpecification,omitempty" tf:"password_payload_specification,omitempty"` + // Payload specification for password generation. + // +kubebuilder:validation:Optional + PasswordPayloadSpecification []PasswordPayloadSpecificationParameters `json:"passwordPayloadSpecification,omitempty" tf:"password_payload_specification,omitempty"` } // SecretSpec defines the desired state of Secret type SecretSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider SecretParameters `json:"forProvider"` + ForProvider SecretParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -255,20 +238,19 @@ type SecretSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider SecretInitParameters `json:"initProvider,omitempty"` + InitProvider SecretInitParameters `json:"initProvider,omitempty"` } // SecretStatus defines the observed state of Secret. type SecretStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider SecretObservation `json:"atProvider,omitempty"` + AtProvider SecretObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Secret is the Schema for the Secrets API. Manages Yandex Cloud Lockbox secret. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/lockbox/v1alpha1/zz_secretiambinding_terraformed.go b/apis/lockbox/v1alpha1/zz_secretiambinding_terraformed.go index 0c18807..21b9337 100755 --- a/apis/lockbox/v1alpha1/zz_secretiambinding_terraformed.go +++ b/apis/lockbox/v1alpha1/zz_secretiambinding_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this SecretIAMBinding func (mg *SecretIAMBinding) GetTerraformResourceType() string { - return "yandex_lockbox_secret_iam_binding" + return "yandex_lockbox_secret_iam_binding" } // GetConnectionDetailsMapping for this SecretIAMBinding func (tr *SecretIAMBinding) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this SecretIAMBinding func (tr *SecretIAMBinding) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this SecretIAMBinding func (tr *SecretIAMBinding) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this SecretIAMBinding func (tr *SecretIAMBinding) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this SecretIAMBinding func (tr *SecretIAMBinding) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this SecretIAMBinding func (tr *SecretIAMBinding) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this SecretIAMBinding func (tr *SecretIAMBinding) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this SecretIAMBinding func (tr *SecretIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this SecretIAMBinding using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *SecretIAMBinding) LateInitialize(attrs []byte) (bool, error) { - params := &SecretIAMBindingParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &SecretIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *SecretIAMBinding) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/lockbox/v1alpha1/zz_secretiambinding_types.go b/apis/lockbox/v1alpha1/zz_secretiambinding_types.go index a151fac..c8d92b8 100755 --- a/apis/lockbox/v1alpha1/zz_secretiambinding_types.go +++ b/apis/lockbox/v1alpha1/zz_secretiambinding_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,114 +7,104 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type SecretIAMBindingInitParameters struct { + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() -// +crossplane:generate:reference:refFieldName=ServiceAccountRef -// +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` - -// The role that should be applied. See roles. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The Yandex Lockbox Secret Secret ID to apply a binding to. -// +crossplane:generate:reference:type=Secret -SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` + // The Yandex Lockbox Secret Secret ID to apply a binding to. + // +crossplane:generate:reference:type=Secret + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` -// Reference to a Secret to populate secretId. -// +kubebuilder:validation:Optional -SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` + // Reference to a Secret to populate secretId. + // +kubebuilder:validation:Optional + SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` -// Selector for a Secret to populate secretId. -// +kubebuilder:validation:Optional -SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` + // Selector for a Secret to populate secretId. + // +kubebuilder:validation:Optional + SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` -// References to ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` -// Selector for a list of ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type SecretIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` - -// The role that should be applied. See roles. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The Yandex Lockbox Secret Secret ID to apply a binding to. -SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` + // The Yandex Lockbox Secret Secret ID to apply a binding to. + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type SecretIAMBindingParameters struct { - -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() -// +crossplane:generate:reference:refFieldName=ServiceAccountRef -// +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector -// +kubebuilder:validation:Optional -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` - -// The role that should be applied. See roles. -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -// The Yandex Lockbox Secret Secret ID to apply a binding to. -// +crossplane:generate:reference:type=Secret -// +kubebuilder:validation:Optional -SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` - -// Reference to a Secret to populate secretId. -// +kubebuilder:validation:Optional -SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` - -// Selector for a Secret to populate secretId. -// +kubebuilder:validation:Optional -SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` - -// References to ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` - -// Selector for a list of ServiceAccount in iam to populate members. -// +kubebuilder:validation:Optional -ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` - -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The Yandex Lockbox Secret Secret ID to apply a binding to. + // +crossplane:generate:reference:type=Secret + // +kubebuilder:validation:Optional + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` + + // Reference to a Secret to populate secretId. + // +kubebuilder:validation:Optional + SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` + + // Selector for a Secret to populate secretId. + // +kubebuilder:validation:Optional + SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // SecretIAMBindingSpec defines the desired state of SecretIAMBinding type SecretIAMBindingSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider SecretIAMBindingParameters `json:"forProvider"` + ForProvider SecretIAMBindingParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -127,20 +115,19 @@ type SecretIAMBindingSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider SecretIAMBindingInitParameters `json:"initProvider,omitempty"` + InitProvider SecretIAMBindingInitParameters `json:"initProvider,omitempty"` } // SecretIAMBindingStatus defines the observed state of SecretIAMBinding. type SecretIAMBindingStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider SecretIAMBindingObservation `json:"atProvider,omitempty"` + AtProvider SecretIAMBindingObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // SecretIAMBinding is the Schema for the SecretIAMBindings API. Allows management of a single IAM binding for a // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -150,9 +137,9 @@ type SecretIAMBindingStatus struct { type SecretIAMBinding struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec SecretIAMBindingSpec `json:"spec"` - Status SecretIAMBindingStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec SecretIAMBindingSpec `json:"spec"` + Status SecretIAMBindingStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/lockbox/v1alpha1/zz_secretversion_terraformed.go b/apis/lockbox/v1alpha1/zz_secretversion_terraformed.go index 94ea697..28ca3d1 100755 --- a/apis/lockbox/v1alpha1/zz_secretversion_terraformed.go +++ b/apis/lockbox/v1alpha1/zz_secretversion_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this SecretVersion func (mg *SecretVersion) GetTerraformResourceType() string { - return "yandex_lockbox_secret_version" + return "yandex_lockbox_secret_version" } // GetConnectionDetailsMapping for this SecretVersion func (tr *SecretVersion) GetConnectionDetailsMapping() map[string]string { - return map[string]string{ "entries[*].text_value": "entries[*].textValueSecretRef", } + return map[string]string{"entries[*].text_value": "entries[*].textValueSecretRef"} } // GetObservation of this SecretVersion func (tr *SecretVersion) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this SecretVersion func (tr *SecretVersion) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this SecretVersion func (tr *SecretVersion) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this SecretVersion func (tr *SecretVersion) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this SecretVersion func (tr *SecretVersion) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this SecretVersion func (tr *SecretVersion) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this SecretVersion func (tr *SecretVersion) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this SecretVersion using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *SecretVersion) LateInitialize(attrs []byte) (bool, error) { - params := &SecretVersionParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &SecretVersionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *SecretVersion) GetTerraformSchemaVersion() int { - return 1 + return 1 } diff --git a/apis/lockbox/v1alpha1/zz_secretversion_types.go b/apis/lockbox/v1alpha1/zz_secretversion_types.go index c251127..d7cde73 100755 --- a/apis/lockbox/v1alpha1/zz_secretversion_types.go +++ b/apis/lockbox/v1alpha1/zz_secretversion_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,170 +7,149 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type CommandInitParameters struct { + // List of arguments to be passed to the script/command. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` -// List of arguments to be passed to the script/command. -Args []*string `json:"args,omitempty" tf:"args,omitempty"` + // Map of environment variables to set before calling the script/command. + // +mapType=granular + Env map[string]*string `json:"env,omitempty" tf:"env,omitempty"` -// Map of environment variables to set before calling the script/command. -// +mapType=granular -Env map[string]*string `json:"env,omitempty" tf:"env,omitempty"` - -// The path to the script or command to execute. -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // The path to the script or command to execute. + Path *string `json:"path,omitempty" tf:"path,omitempty"` } - type CommandObservation struct { + // List of arguments to be passed to the script/command. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` -// List of arguments to be passed to the script/command. -Args []*string `json:"args,omitempty" tf:"args,omitempty"` - -// Map of environment variables to set before calling the script/command. -// +mapType=granular -Env map[string]*string `json:"env,omitempty" tf:"env,omitempty"` + // Map of environment variables to set before calling the script/command. + // +mapType=granular + Env map[string]*string `json:"env,omitempty" tf:"env,omitempty"` -// The path to the script or command to execute. -Path *string `json:"path,omitempty" tf:"path,omitempty"` + // The path to the script or command to execute. + Path *string `json:"path,omitempty" tf:"path,omitempty"` } - type CommandParameters struct { + // List of arguments to be passed to the script/command. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` -// List of arguments to be passed to the script/command. -// +kubebuilder:validation:Optional -Args []*string `json:"args,omitempty" tf:"args,omitempty"` - -// Map of environment variables to set before calling the script/command. -// +kubebuilder:validation:Optional -// +mapType=granular -Env map[string]*string `json:"env,omitempty" tf:"env,omitempty"` + // Map of environment variables to set before calling the script/command. + // +kubebuilder:validation:Optional + // +mapType=granular + Env map[string]*string `json:"env,omitempty" tf:"env,omitempty"` -// The path to the script or command to execute. -// +kubebuilder:validation:Optional -Path *string `json:"path" tf:"path,omitempty"` + // The path to the script or command to execute. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` } - type EntriesInitParameters struct { + // The command that generates the text value of the entry. + Command []CommandInitParameters `json:"command,omitempty" tf:"command,omitempty"` -// The command that generates the text value of the entry. -Command []CommandInitParameters `json:"command,omitempty" tf:"command,omitempty"` + // The key of the entry. + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// The key of the entry. -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// The text value of the entry. -TextValueSecretRef *v1.SecretKeySelector `json:"textValueSecretRef,omitempty" tf:"-"` + // The text value of the entry. + TextValueSecretRef *v1.SecretKeySelector `json:"textValueSecretRef,omitempty" tf:"-"` } - type EntriesObservation struct { + // The command that generates the text value of the entry. + Command []CommandObservation `json:"command,omitempty" tf:"command,omitempty"` -// The command that generates the text value of the entry. -Command []CommandObservation `json:"command,omitempty" tf:"command,omitempty"` - -// The key of the entry. -Key *string `json:"key,omitempty" tf:"key,omitempty"` + // The key of the entry. + Key *string `json:"key,omitempty" tf:"key,omitempty"` } - type EntriesParameters struct { + // The command that generates the text value of the entry. + // +kubebuilder:validation:Optional + Command []CommandParameters `json:"command,omitempty" tf:"command,omitempty"` -// The command that generates the text value of the entry. -// +kubebuilder:validation:Optional -Command []CommandParameters `json:"command,omitempty" tf:"command,omitempty"` - -// The key of the entry. -// +kubebuilder:validation:Optional -Key *string `json:"key" tf:"key,omitempty"` + // The key of the entry. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` -// The text value of the entry. -// +kubebuilder:validation:Optional -TextValueSecretRef *v1.SecretKeySelector `json:"textValueSecretRef,omitempty" tf:"-"` + // The text value of the entry. + // +kubebuilder:validation:Optional + TextValueSecretRef *v1.SecretKeySelector `json:"textValueSecretRef,omitempty" tf:"-"` } - type SecretVersionInitParameters struct { + // The Yandex Cloud Lockbox secret version description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The Yandex Cloud Lockbox secret version description. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // List of entries in the Yandex Cloud Lockbox secret version. Must be omitted for secrets with a payload specification. + Entries []EntriesInitParameters `json:"entries,omitempty" tf:"entries,omitempty"` -// List of entries in the Yandex Cloud Lockbox secret version. Must be omitted for secrets with a payload specification. -Entries []EntriesInitParameters `json:"entries,omitempty" tf:"entries,omitempty"` + // The Yandex Cloud Lockbox secret ID where to add the version. + // +crossplane:generate:reference:type=Secret + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` -// The Yandex Cloud Lockbox secret ID where to add the version. -// +crossplane:generate:reference:type=Secret -SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` + // Reference to a Secret to populate secretId. + // +kubebuilder:validation:Optional + SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` -// Reference to a Secret to populate secretId. -// +kubebuilder:validation:Optional -SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` - -// Selector for a Secret to populate secretId. -// +kubebuilder:validation:Optional -SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` + // Selector for a Secret to populate secretId. + // +kubebuilder:validation:Optional + SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` } - type SecretVersionObservation struct { + // The Yandex Cloud Lockbox secret version description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The Yandex Cloud Lockbox secret version description. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// List of entries in the Yandex Cloud Lockbox secret version. Must be omitted for secrets with a payload specification. -Entries []EntriesObservation `json:"entries,omitempty" tf:"entries,omitempty"` + // List of entries in the Yandex Cloud Lockbox secret version. Must be omitted for secrets with a payload specification. + Entries []EntriesObservation `json:"entries,omitempty" tf:"entries,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// The Yandex Cloud Lockbox secret ID where to add the version. -SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` + // The Yandex Cloud Lockbox secret ID where to add the version. + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` } - type SecretVersionParameters struct { + // The Yandex Cloud Lockbox secret version description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// The Yandex Cloud Lockbox secret version description. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// List of entries in the Yandex Cloud Lockbox secret version. Must be omitted for secrets with a payload specification. -// +kubebuilder:validation:Optional -Entries []EntriesParameters `json:"entries,omitempty" tf:"entries,omitempty"` + // List of entries in the Yandex Cloud Lockbox secret version. Must be omitted for secrets with a payload specification. + // +kubebuilder:validation:Optional + Entries []EntriesParameters `json:"entries,omitempty" tf:"entries,omitempty"` -// The Yandex Cloud Lockbox secret ID where to add the version. -// +crossplane:generate:reference:type=Secret -// +kubebuilder:validation:Optional -SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` + // The Yandex Cloud Lockbox secret ID where to add the version. + // +crossplane:generate:reference:type=Secret + // +kubebuilder:validation:Optional + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` -// Reference to a Secret to populate secretId. -// +kubebuilder:validation:Optional -SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` + // Reference to a Secret to populate secretId. + // +kubebuilder:validation:Optional + SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` -// Selector for a Secret to populate secretId. -// +kubebuilder:validation:Optional -SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` + // Selector for a Secret to populate secretId. + // +kubebuilder:validation:Optional + SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` } // SecretVersionSpec defines the desired state of SecretVersion type SecretVersionSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider SecretVersionParameters `json:"forProvider"` + ForProvider SecretVersionParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -183,20 +160,19 @@ type SecretVersionSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider SecretVersionInitParameters `json:"initProvider,omitempty"` + InitProvider SecretVersionInitParameters `json:"initProvider,omitempty"` } // SecretVersionStatus defines the observed state of SecretVersion. type SecretVersionStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider SecretVersionObservation `json:"atProvider,omitempty"` + AtProvider SecretVersionObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // SecretVersion is the Schema for the SecretVersions API. Manages Yandex Cloud Lockbox secret version. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/logging/v1alpha1/zz_generated.conversion_hubs.go b/apis/logging/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..755a865 --- /dev/null +++ b/apis/logging/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Group) Hub() {} diff --git a/apis/logging/v1alpha1/zz_generated.deepcopy.go b/apis/logging/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b6e87a5 --- /dev/null +++ b/apis/logging/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,312 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Group) DeepCopyInto(out *Group) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group. +func (in *Group) DeepCopy() *Group { + if in == nil { + return nil + } + out := new(Group) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Group) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupInitParameters) DeepCopyInto(out *GroupInitParameters) { + *out = *in + if in.DataStream != nil { + in, out := &in.DataStream, &out.DataStream + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupInitParameters. +func (in *GroupInitParameters) DeepCopy() *GroupInitParameters { + if in == nil { + return nil + } + out := new(GroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupList) DeepCopyInto(out *GroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Group, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList. +func (in *GroupList) DeepCopy() *GroupList { + if in == nil { + return nil + } + out := new(GroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupObservation) DeepCopyInto(out *GroupObservation) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DataStream != nil { + in, out := &in.DataStream, &out.DataStream + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupObservation. +func (in *GroupObservation) DeepCopy() *GroupObservation { + if in == nil { + return nil + } + out := new(GroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupParameters) DeepCopyInto(out *GroupParameters) { + *out = *in + if in.DataStream != nil { + in, out := &in.DataStream, &out.DataStream + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupParameters. +func (in *GroupParameters) DeepCopy() *GroupParameters { + if in == nil { + return nil + } + out := new(GroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSpec) DeepCopyInto(out *GroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSpec. +func (in *GroupSpec) DeepCopy() *GroupSpec { + if in == nil { + return nil + } + out := new(GroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupStatus) DeepCopyInto(out *GroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupStatus. +func (in *GroupStatus) DeepCopy() *GroupStatus { + if in == nil { + return nil + } + out := new(GroupStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/logging/v1alpha1/zz_generated.resolvers.go b/apis/logging/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..ce15c86 --- /dev/null +++ b/apis/logging/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Group. +func (mg *Group) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/logging/v1alpha1/zz_group_terraformed.go b/apis/logging/v1alpha1/zz_group_terraformed.go new file mode 100755 index 0000000..895e1c1 --- /dev/null +++ b/apis/logging/v1alpha1/zz_group_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Group +func (mg *Group) GetTerraformResourceType() string { + return "yandex_logging_group" +} + +// GetConnectionDetailsMapping for this Group +func (tr *Group) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Group +func (tr *Group) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Group +func (tr *Group) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Group +func (tr *Group) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Group +func (tr *Group) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Group +func (tr *Group) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Group +func (tr *Group) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Group +func (tr *Group) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Group using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Group) LateInitialize(attrs []byte) (bool, error) { + params := &GroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Group) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/logging/v1alpha1/zz_group_types.go b/apis/logging/v1alpha1/zz_group_types.go new file mode 100755 index 0000000..06920ab --- /dev/null +++ b/apis/logging/v1alpha1/zz_group_types.go @@ -0,0 +1,169 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GroupInitParameters struct { + DataStream *string `json:"dataStream,omitempty" tf:"data_stream,omitempty"` + + // A description for the Yandex Cloud Logging group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the Yandex Cloud Logging group belongs to. It will be deduced from provider configuration if not set explicitly. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Cloud Logging group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name for the Yandex Cloud Logging group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Log entries retention period for the Yandex Cloud Logging group. + RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` +} + +type GroupObservation struct { + + // ID of the cloud that the Yandex Cloud Logging group belong to. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + + // The Yandex Cloud Logging group creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + DataStream *string `json:"dataStream,omitempty" tf:"data_stream,omitempty"` + + // A description for the Yandex Cloud Logging group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the Yandex Cloud Logging group belongs to. It will be deduced from provider configuration if not set explicitly. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // The Yandex Cloud Logging group ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Cloud Logging group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name for the Yandex Cloud Logging group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Log entries retention period for the Yandex Cloud Logging group. + RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // The Yandex Cloud Logging group status. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type GroupParameters struct { + + // +kubebuilder:validation:Optional + DataStream *string `json:"dataStream,omitempty" tf:"data_stream,omitempty"` + + // A description for the Yandex Cloud Logging group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the Yandex Cloud Logging group belongs to. It will be deduced from provider configuration if not set explicitly. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Cloud Logging group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name for the Yandex Cloud Logging group. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Log entries retention period for the Yandex Cloud Logging group. + // +kubebuilder:validation:Optional + RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` +} + +// GroupSpec defines the desired state of Group +type GroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GroupInitParameters `json:"initProvider,omitempty"` +} + +// GroupStatus defines the observed state of Group. +type GroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Group is the Schema for the Groups API. Manages Yandex Cloud Logging group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Group struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec GroupSpec `json:"spec"` + Status GroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GroupList contains a list of Groups +type GroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Group `json:"items"` +} + +// Repository type metadata. +var ( + Group_Kind = "Group" + Group_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Group_Kind}.String() + Group_KindAPIVersion = Group_Kind + "." + CRDGroupVersion.String() + Group_GroupVersionKind = CRDGroupVersion.WithKind(Group_Kind) +) + +func init() { + SchemeBuilder.Register(&Group{}, &GroupList{}) +} diff --git a/apis/logging/v1alpha1/zz_groupversion_info.go b/apis/logging/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..627e90c --- /dev/null +++ b/apis/logging/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=logging.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "logging.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/mdb/v1alpha1/zz_clickhousecluster_terraformed.go b/apis/mdb/v1alpha1/zz_clickhousecluster_terraformed.go new file mode 100755 index 0000000..5b0043a --- /dev/null +++ b/apis/mdb/v1alpha1/zz_clickhousecluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ClickhouseCluster +func (mg *ClickhouseCluster) GetTerraformResourceType() string { + return "yandex_mdb_clickhouse_cluster" +} + +// GetConnectionDetailsMapping for this ClickhouseCluster +func (tr *ClickhouseCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"admin_password": "adminPasswordSecretRef", "clickhouse[*].config[*].kafka[*].sasl_password": "clickhouse[*].config[*].kafka[*].saslPasswordSecretRef", "clickhouse[*].config[*].kafka_topic[*].settings[*].sasl_password": "clickhouse[*].config[*].kafkaTopic[*].settings[*].saslPasswordSecretRef", "clickhouse[*].config[*].rabbitmq[*].password": "clickhouse[*].config[*].rabbitmq[*].passwordSecretRef", "user[*].password": "user[*].passwordSecretRef"} +} + +// GetObservation of this ClickhouseCluster +func (tr *ClickhouseCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ClickhouseCluster +func (tr *ClickhouseCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ClickhouseCluster +func (tr *ClickhouseCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ClickhouseCluster +func (tr *ClickhouseCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ClickhouseCluster +func (tr *ClickhouseCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ClickhouseCluster +func (tr *ClickhouseCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ClickhouseCluster +func (tr *ClickhouseCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ClickhouseCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ClickhouseCluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClickhouseClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ClickhouseCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_clickhousecluster_types.go b/apis/mdb/v1alpha1/zz_clickhousecluster_types.go index f7f2493..280ba39 100755 --- a/apis/mdb/v1alpha1/zz_clickhousecluster_types.go +++ b/apis/mdb/v1alpha1/zz_clickhousecluster_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,4084 +7,3887 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AccessInitParameters struct { + // Allow access for DataLens. Can be either true or false. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for DataLens. Can be either true or false. -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` - -// Allow access for DataTransfer. Can be either true or false. -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer. Can be either true or false. + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` -// Allow access for Yandex.Metrika. Can be either true or false. -Metrika *bool `json:"metrika,omitempty" tf:"metrika,omitempty"` + // Allow access for Yandex.Metrika. Can be either true or false. + Metrika *bool `json:"metrika,omitempty" tf:"metrika,omitempty"` -// Allow access for Serverless. Can be either true or false. -Serverless *bool `json:"serverless,omitempty" tf:"serverless,omitempty"` + // Allow access for Serverless. Can be either true or false. + Serverless *bool `json:"serverless,omitempty" tf:"serverless,omitempty"` -// Allow access for Web SQL. Can be either true or false. -WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` + // Allow access for Web SQL. Can be either true or false. + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` -// Allow access for YandexQuery. Can be either true or false. -YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` + // Allow access for YandexQuery. Can be either true or false. + YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` } - type AccessObservation struct { + // Allow access for DataLens. Can be either true or false. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for DataLens. Can be either true or false. -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` - -// Allow access for DataTransfer. Can be either true or false. -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer. Can be either true or false. + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` -// Allow access for Yandex.Metrika. Can be either true or false. -Metrika *bool `json:"metrika,omitempty" tf:"metrika,omitempty"` + // Allow access for Yandex.Metrika. Can be either true or false. + Metrika *bool `json:"metrika,omitempty" tf:"metrika,omitempty"` -// Allow access for Serverless. Can be either true or false. -Serverless *bool `json:"serverless,omitempty" tf:"serverless,omitempty"` + // Allow access for Serverless. Can be either true or false. + Serverless *bool `json:"serverless,omitempty" tf:"serverless,omitempty"` -// Allow access for Web SQL. Can be either true or false. -WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` + // Allow access for Web SQL. Can be either true or false. + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` -// Allow access for YandexQuery. Can be either true or false. -YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` + // Allow access for YandexQuery. Can be either true or false. + YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` } - type AccessParameters struct { + // Allow access for DataLens. Can be either true or false. + // +kubebuilder:validation:Optional + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for DataLens. Can be either true or false. -// +kubebuilder:validation:Optional -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` + // Allow access for DataTransfer. Can be either true or false. + // +kubebuilder:validation:Optional + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` -// Allow access for DataTransfer. Can be either true or false. -// +kubebuilder:validation:Optional -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for Yandex.Metrika. Can be either true or false. + // +kubebuilder:validation:Optional + Metrika *bool `json:"metrika,omitempty" tf:"metrika,omitempty"` -// Allow access for Yandex.Metrika. Can be either true or false. -// +kubebuilder:validation:Optional -Metrika *bool `json:"metrika,omitempty" tf:"metrika,omitempty"` + // Allow access for Serverless. Can be either true or false. + // +kubebuilder:validation:Optional + Serverless *bool `json:"serverless,omitempty" tf:"serverless,omitempty"` -// Allow access for Serverless. Can be either true or false. -// +kubebuilder:validation:Optional -Serverless *bool `json:"serverless,omitempty" tf:"serverless,omitempty"` + // Allow access for Web SQL. Can be either true or false. + // +kubebuilder:validation:Optional + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` -// Allow access for Web SQL. Can be either true or false. -// +kubebuilder:validation:Optional -WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` - -// Allow access for YandexQuery. Can be either true or false. -// +kubebuilder:validation:Optional -YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` + // Allow access for YandexQuery. Can be either true or false. + // +kubebuilder:validation:Optional + YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` } - type BackupWindowStartInitParameters struct { + // The hour at which backup will be started. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started. -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started. -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type BackupWindowStartObservation struct { + // The hour at which backup will be started. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started. -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started. -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type BackupWindowStartParameters struct { + // The hour at which backup will be started. + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started. -// +kubebuilder:validation:Optional -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started. -// +kubebuilder:validation:Optional -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started. + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type ClickhouseClusterInitParameters struct { + // Access policy to the ClickHouse cluster. The structure is documented below. + Access []AccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the ClickHouse cluster. The structure is documented below. -Access []AccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` - -// A password used to authorize as user admin when sql_user_management enabled. -AdminPasswordSecretRef *v1.SecretKeySelector `json:"adminPasswordSecretRef,omitempty" tf:"-"` + // A password used to authorize as user admin when sql_user_management enabled. + AdminPasswordSecretRef *v1.SecretKeySelector `json:"adminPasswordSecretRef,omitempty" tf:"-"` -// The period in days during which backups are stored. -BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + // The period in days during which backups are stored. + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` -// Time to start the daily backup, in the UTC timezone. The structure is documented below. -BackupWindowStart []BackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + BackupWindowStart []BackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Configuration of the ClickHouse subcluster. The structure is documented below. -Clickhouse []ClickhouseInitParameters `json:"clickhouse,omitempty" tf:"clickhouse,omitempty"` + // Configuration of the ClickHouse subcluster. The structure is documented below. + Clickhouse []ClickhouseInitParameters `json:"clickhouse,omitempty" tf:"clickhouse,omitempty"` -// Minimum data age in seconds. -CloudStorage []CloudStorageInitParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` + // Minimum data age in seconds. + CloudStorage []CloudStorageInitParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// Whether to copy schema on new ClickHouse hosts. -CopySchemaOnNewHosts *bool `json:"copySchemaOnNewHosts,omitempty" tf:"copy_schema_on_new_hosts,omitempty"` + // Whether to copy schema on new ClickHouse hosts. + CopySchemaOnNewHosts *bool `json:"copySchemaOnNewHosts,omitempty" tf:"copy_schema_on_new_hosts,omitempty"` -// A database of the ClickHouse cluster. The structure is documented below. -Database []DatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` + // A database of the ClickHouse cluster. The structure is documented below. + Database []DatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the ClickHouse cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the ClickHouse cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Whether to use ClickHouse Keeper as a coordination system and place it on the same hosts with ClickHouse. If not, it's used ZooKeeper with placement on separate hosts. -EmbeddedKeeper *bool `json:"embeddedKeeper,omitempty" tf:"embedded_keeper,omitempty"` + // Whether to use ClickHouse Keeper as a coordination system and place it on the same hosts with ClickHouse. If not, it's used ZooKeeper with placement on separate hosts. + EmbeddedKeeper *bool `json:"embeddedKeeper,omitempty" tf:"embedded_keeper,omitempty"` -// Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of protobuf or capnproto format schemas. The structure is documented below. -FormatSchema []FormatSchemaInitParameters `json:"formatSchema,omitempty" tf:"format_schema,omitempty"` + // A set of protobuf or capnproto format schemas. The structure is documented below. + FormatSchema []FormatSchemaInitParameters `json:"formatSchema,omitempty" tf:"format_schema,omitempty"` -// A host of the ClickHouse cluster. The structure is documented below. -Host []HostInitParameters `json:"host,omitempty" tf:"host,omitempty"` + // A host of the ClickHouse cluster. The structure is documented below. + Host []HostInitParameters `json:"host,omitempty" tf:"host,omitempty"` -// A set of key/value label pairs to assign to the ClickHouse cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the ClickHouse cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A group of machine learning models. The structure is documented below -MLModel []MLModelInitParameters `json:"mlModel,omitempty" tf:"ml_model,omitempty"` + // A group of machine learning models. The structure is documented below + MLModel []MLModelInitParameters `json:"mlModel,omitempty" tf:"ml_model,omitempty"` -MaintenanceWindow []MaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + MaintenanceWindow []MaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Name of the ClickHouse cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the ClickHouse cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network, to which the ClickHouse cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network, to which the ClickHouse cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// Grants admin user database management permission. -SQLDatabaseManagement *bool `json:"sqlDatabaseManagement,omitempty" tf:"sql_database_management,omitempty"` + // Grants admin user database management permission. + SQLDatabaseManagement *bool `json:"sqlDatabaseManagement,omitempty" tf:"sql_database_management,omitempty"` -// Enables admin user with user management permission. -SQLUserManagement *bool `json:"sqlUserManagement,omitempty" tf:"sql_user_management,omitempty"` + // Enables admin user with user management permission. + SQLUserManagement *bool `json:"sqlUserManagement,omitempty" tf:"sql_user_management,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` -// ID of the service account used for access to Yandex Object Storage. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // ID of the service account used for access to Yandex Object Storage. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Reference to a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` -// Selector for a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` -Shard []ShardInitParameters `json:"shard,omitempty" tf:"shard,omitempty"` + Shard []ShardInitParameters `json:"shard,omitempty" tf:"shard,omitempty"` -// A group of clickhouse shards. The structure is documented below. -ShardGroup []ShardGroupInitParameters `json:"shardGroup,omitempty" tf:"shard_group,omitempty"` + // A group of clickhouse shards. The structure is documented below. + ShardGroup []ShardGroupInitParameters `json:"shardGroup,omitempty" tf:"shard_group,omitempty"` -// A user of the ClickHouse cluster. The structure is documented below. -User []UserInitParameters `json:"user,omitempty" tf:"user,omitempty"` + // A user of the ClickHouse cluster. The structure is documented below. + User []UserInitParameters `json:"user,omitempty" tf:"user,omitempty"` -// Version of the ClickHouse server software. -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of the ClickHouse server software. + Version *string `json:"version,omitempty" tf:"version,omitempty"` -// Configuration of the ZooKeeper subcluster. The structure is documented below. -Zookeeper []ZookeeperInitParameters `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` + // Configuration of the ZooKeeper subcluster. The structure is documented below. + Zookeeper []ZookeeperInitParameters `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` } - type ClickhouseClusterObservation struct { + // Access policy to the ClickHouse cluster. The structure is documented below. + Access []AccessObservation `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the ClickHouse cluster. The structure is documented below. -Access []AccessObservation `json:"access,omitempty" tf:"access,omitempty"` + // The period in days during which backups are stored. + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` -// The period in days during which backups are stored. -BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + BackupWindowStart []BackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Time to start the daily backup, in the UTC timezone. The structure is documented below. -BackupWindowStart []BackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Configuration of the ClickHouse subcluster. The structure is documented below. + Clickhouse []ClickhouseObservation `json:"clickhouse,omitempty" tf:"clickhouse,omitempty"` -// Configuration of the ClickHouse subcluster. The structure is documented below. -Clickhouse []ClickhouseObservation `json:"clickhouse,omitempty" tf:"clickhouse,omitempty"` + // Minimum data age in seconds. + CloudStorage []CloudStorageObservation `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` -// Minimum data age in seconds. -CloudStorage []CloudStorageObservation `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + // Whether to copy schema on new ClickHouse hosts. + CopySchemaOnNewHosts *bool `json:"copySchemaOnNewHosts,omitempty" tf:"copy_schema_on_new_hosts,omitempty"` -// Whether to copy schema on new ClickHouse hosts. -CopySchemaOnNewHosts *bool `json:"copySchemaOnNewHosts,omitempty" tf:"copy_schema_on_new_hosts,omitempty"` + // Timestamp of cluster creation. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Timestamp of cluster creation. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // A database of the ClickHouse cluster. The structure is documented below. + Database []DatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` -// A database of the ClickHouse cluster. The structure is documented below. -Database []DatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Description of the ClickHouse cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the ClickHouse cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Whether to use ClickHouse Keeper as a coordination system and place it on the same hosts with ClickHouse. If not, it's used ZooKeeper with placement on separate hosts. + EmbeddedKeeper *bool `json:"embeddedKeeper,omitempty" tf:"embedded_keeper,omitempty"` -// Whether to use ClickHouse Keeper as a coordination system and place it on the same hosts with ClickHouse. If not, it's used ZooKeeper with placement on separate hosts. -EmbeddedKeeper *bool `json:"embeddedKeeper,omitempty" tf:"embedded_keeper,omitempty"` + // Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // A set of protobuf or capnproto format schemas. The structure is documented below. + FormatSchema []FormatSchemaObservation `json:"formatSchema,omitempty" tf:"format_schema,omitempty"` -// A set of protobuf or capnproto format schemas. The structure is documented below. -FormatSchema []FormatSchemaObservation `json:"formatSchema,omitempty" tf:"format_schema,omitempty"` + // Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation. + Health *string `json:"health,omitempty" tf:"health,omitempty"` -// Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation. -Health *string `json:"health,omitempty" tf:"health,omitempty"` + // A host of the ClickHouse cluster. The structure is documented below. + Host []HostObservation `json:"host,omitempty" tf:"host,omitempty"` -// A host of the ClickHouse cluster. The structure is documented below. -Host []HostObservation `json:"host,omitempty" tf:"host,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // A set of key/value label pairs to assign to the ClickHouse cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the ClickHouse cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A group of machine learning models. The structure is documented below + MLModel []MLModelObservation `json:"mlModel,omitempty" tf:"ml_model,omitempty"` -// A group of machine learning models. The structure is documented below -MLModel []MLModelObservation `json:"mlModel,omitempty" tf:"ml_model,omitempty"` + MaintenanceWindow []MaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -MaintenanceWindow []MaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Name of the ClickHouse cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the ClickHouse cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // ID of the network, to which the ClickHouse cluster belongs. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// ID of the network, to which the ClickHouse cluster belongs. -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Grants admin user database management permission. + SQLDatabaseManagement *bool `json:"sqlDatabaseManagement,omitempty" tf:"sql_database_management,omitempty"` -// Grants admin user database management permission. -SQLDatabaseManagement *bool `json:"sqlDatabaseManagement,omitempty" tf:"sql_database_management,omitempty"` + // Enables admin user with user management permission. + SQLUserManagement *bool `json:"sqlUserManagement,omitempty" tf:"sql_user_management,omitempty"` -// Enables admin user with user management permission. -SQLUserManagement *bool `json:"sqlUserManagement,omitempty" tf:"sql_user_management,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // ID of the service account used for access to Yandex Object Storage. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// ID of the service account used for access to Yandex Object Storage. -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + Shard []ShardObservation `json:"shard,omitempty" tf:"shard,omitempty"` -Shard []ShardObservation `json:"shard,omitempty" tf:"shard,omitempty"` + // A group of clickhouse shards. The structure is documented below. + ShardGroup []ShardGroupObservation `json:"shardGroup,omitempty" tf:"shard_group,omitempty"` -// A group of clickhouse shards. The structure is documented below. -ShardGroup []ShardGroupObservation `json:"shardGroup,omitempty" tf:"shard_group,omitempty"` + // Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation. + Status *string `json:"status,omitempty" tf:"status,omitempty"` -// Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // A user of the ClickHouse cluster. The structure is documented below. + User []UserObservation `json:"user,omitempty" tf:"user,omitempty"` -// A user of the ClickHouse cluster. The structure is documented below. -User []UserObservation `json:"user,omitempty" tf:"user,omitempty"` + // Version of the ClickHouse server software. + Version *string `json:"version,omitempty" tf:"version,omitempty"` -// Version of the ClickHouse server software. -Version *string `json:"version,omitempty" tf:"version,omitempty"` - -// Configuration of the ZooKeeper subcluster. The structure is documented below. -Zookeeper []ZookeeperObservation `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` + // Configuration of the ZooKeeper subcluster. The structure is documented below. + Zookeeper []ZookeeperObservation `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` } - type ClickhouseClusterParameters struct { + // Access policy to the ClickHouse cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Access []AccessParameters `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the ClickHouse cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Access []AccessParameters `json:"access,omitempty" tf:"access,omitempty"` - -// A password used to authorize as user admin when sql_user_management enabled. -// +kubebuilder:validation:Optional -AdminPasswordSecretRef *v1.SecretKeySelector `json:"adminPasswordSecretRef,omitempty" tf:"-"` + // A password used to authorize as user admin when sql_user_management enabled. + // +kubebuilder:validation:Optional + AdminPasswordSecretRef *v1.SecretKeySelector `json:"adminPasswordSecretRef,omitempty" tf:"-"` -// The period in days during which backups are stored. -// +kubebuilder:validation:Optional -BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + // The period in days during which backups are stored. + // +kubebuilder:validation:Optional + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` -// Time to start the daily backup, in the UTC timezone. The structure is documented below. -// +kubebuilder:validation:Optional -BackupWindowStart []BackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + // +kubebuilder:validation:Optional + BackupWindowStart []BackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Configuration of the ClickHouse subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Clickhouse []ClickhouseParameters `json:"clickhouse,omitempty" tf:"clickhouse,omitempty"` + // Configuration of the ClickHouse subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Clickhouse []ClickhouseParameters `json:"clickhouse,omitempty" tf:"clickhouse,omitempty"` -// Minimum data age in seconds. -// +kubebuilder:validation:Optional -CloudStorage []CloudStorageParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` + // Minimum data age in seconds. + // +kubebuilder:validation:Optional + CloudStorage []CloudStorageParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Whether to copy schema on new ClickHouse hosts. + // +kubebuilder:validation:Optional + CopySchemaOnNewHosts *bool `json:"copySchemaOnNewHosts,omitempty" tf:"copy_schema_on_new_hosts,omitempty"` + + // A database of the ClickHouse cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Database []DatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Whether to copy schema on new ClickHouse hosts. -// +kubebuilder:validation:Optional -CopySchemaOnNewHosts *bool `json:"copySchemaOnNewHosts,omitempty" tf:"copy_schema_on_new_hosts,omitempty"` + // Description of the ClickHouse cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A database of the ClickHouse cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Database []DatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` + // Whether to use ClickHouse Keeper as a coordination system and place it on the same hosts with ClickHouse. If not, it's used ZooKeeper with placement on separate hosts. + // +kubebuilder:validation:Optional + EmbeddedKeeper *bool `json:"embeddedKeeper,omitempty" tf:"embedded_keeper,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION. + // +kubebuilder:validation:Optional + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// Description of the ClickHouse cluster. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Whether to use ClickHouse Keeper as a coordination system and place it on the same hosts with ClickHouse. If not, it's used ZooKeeper with placement on separate hosts. -// +kubebuilder:validation:Optional -EmbeddedKeeper *bool `json:"embeddedKeeper,omitempty" tf:"embedded_keeper,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION. -// +kubebuilder:validation:Optional -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // A set of protobuf or capnproto format schemas. The structure is documented below. + // +kubebuilder:validation:Optional + FormatSchema []FormatSchemaParameters `json:"formatSchema,omitempty" tf:"format_schema,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // A host of the ClickHouse cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Host []HostParameters `json:"host,omitempty" tf:"host,omitempty"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // A set of key/value label pairs to assign to the ClickHouse cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of protobuf or capnproto format schemas. The structure is documented below. -// +kubebuilder:validation:Optional -FormatSchema []FormatSchemaParameters `json:"formatSchema,omitempty" tf:"format_schema,omitempty"` + // A group of machine learning models. The structure is documented below + // +kubebuilder:validation:Optional + MLModel []MLModelParameters `json:"mlModel,omitempty" tf:"ml_model,omitempty"` -// A host of the ClickHouse cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Host []HostParameters `json:"host,omitempty" tf:"host,omitempty"` + // +kubebuilder:validation:Optional + MaintenanceWindow []MaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// A set of key/value label pairs to assign to the ClickHouse cluster. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name of the ClickHouse cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A group of machine learning models. The structure is documented below -// +kubebuilder:validation:Optional -MLModel []MLModelParameters `json:"mlModel,omitempty" tf:"ml_model,omitempty"` + // ID of the network, to which the ClickHouse cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// +kubebuilder:validation:Optional -MaintenanceWindow []MaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Name of the ClickHouse cluster. Provided by the client when the cluster is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// ID of the network, to which the ClickHouse cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Grants admin user database management permission. + // +kubebuilder:validation:Optional + SQLDatabaseManagement *bool `json:"sqlDatabaseManagement,omitempty" tf:"sql_database_management,omitempty"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Enables admin user with user management permission. + // +kubebuilder:validation:Optional + SQLUserManagement *bool `json:"sqlUserManagement,omitempty" tf:"sql_user_management,omitempty"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` - -// Grants admin user database management permission. -// +kubebuilder:validation:Optional -SQLDatabaseManagement *bool `json:"sqlDatabaseManagement,omitempty" tf:"sql_database_management,omitempty"` - -// Enables admin user with user management permission. -// +kubebuilder:validation:Optional -SQLUserManagement *bool `json:"sqlUserManagement,omitempty" tf:"sql_user_management,omitempty"` - -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` - -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` - -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` - -// ID of the service account used for access to Yandex Object Storage. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// Reference to a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` - -// Selector for a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` - -// +kubebuilder:validation:Optional -Shard []ShardParameters `json:"shard,omitempty" tf:"shard,omitempty"` - -// A group of clickhouse shards. The structure is documented below. -// +kubebuilder:validation:Optional -ShardGroup []ShardGroupParameters `json:"shardGroup,omitempty" tf:"shard_group,omitempty"` - -// A user of the ClickHouse cluster. The structure is documented below. -// +kubebuilder:validation:Optional -User []UserParameters `json:"user,omitempty" tf:"user,omitempty"` - -// Version of the ClickHouse server software. -// +kubebuilder:validation:Optional -Version *string `json:"version,omitempty" tf:"version,omitempty"` - -// Configuration of the ZooKeeper subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Zookeeper []ZookeeperParameters `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` -} + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -type ClickhouseInitParameters struct { + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + // ID of the service account used for access to Yandex Object Storage. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Main ClickHouse cluster configuration. -Config []ConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` -// Resources allocated to hosts of the ClickHouse subcluster. The structure is documented below. -Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` -} + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + // +kubebuilder:validation:Optional + Shard []ShardParameters `json:"shard,omitempty" tf:"shard,omitempty"` -type ClickhouseObservation struct { + // A group of clickhouse shards. The structure is documented below. + // +kubebuilder:validation:Optional + ShardGroup []ShardGroupParameters `json:"shardGroup,omitempty" tf:"shard_group,omitempty"` + // A user of the ClickHouse cluster. The structure is documented below. + // +kubebuilder:validation:Optional + User []UserParameters `json:"user,omitempty" tf:"user,omitempty"` -// Main ClickHouse cluster configuration. -Config []ConfigObservation `json:"config,omitempty" tf:"config,omitempty"` + // Version of the ClickHouse server software. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` -// Resources allocated to hosts of the ClickHouse subcluster. The structure is documented below. -Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Configuration of the ZooKeeper subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Zookeeper []ZookeeperParameters `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` } +type ClickhouseInitParameters struct { -type ClickhouseParameters struct { + // Main ClickHouse cluster configuration. + Config []ConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` + // Resources allocated to hosts of the ClickHouse subcluster. The structure is documented below. + Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` +} -// Main ClickHouse cluster configuration. -// +kubebuilder:validation:Optional -Config []ConfigParameters `json:"config,omitempty" tf:"config,omitempty"` +type ClickhouseObservation struct { + + // Main ClickHouse cluster configuration. + Config []ConfigObservation `json:"config,omitempty" tf:"config,omitempty"` -// Resources allocated to hosts of the ClickHouse subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []ResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the ClickHouse subcluster. The structure is documented below. + Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` } +type ClickhouseParameters struct { -type CloudStorageInitParameters struct { + // Main ClickHouse cluster configuration. + // +kubebuilder:validation:Optional + Config []ConfigParameters `json:"config,omitempty" tf:"config,omitempty"` + + // Resources allocated to hosts of the ClickHouse subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []ResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` +} +type CloudStorageInitParameters struct { -// Enables temporary storage in the cluster repository of data requested from the object repository. -DataCacheEnabled *bool `json:"dataCacheEnabled,omitempty" tf:"data_cache_enabled,omitempty"` + // Enables temporary storage in the cluster repository of data requested from the object repository. + DataCacheEnabled *bool `json:"dataCacheEnabled,omitempty" tf:"data_cache_enabled,omitempty"` -// Defines the maximum amount of memory (in bytes) allocated in the cluster storage for temporary storage of data requested from the object storage. -DataCacheMaxSize *float64 `json:"dataCacheMaxSize,omitempty" tf:"data_cache_max_size,omitempty"` + // Defines the maximum amount of memory (in bytes) allocated in the cluster storage for temporary storage of data requested from the object storage. + DataCacheMaxSize *float64 `json:"dataCacheMaxSize,omitempty" tf:"data_cache_max_size,omitempty"` -// Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false. -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// Sets the minimum free space ratio in the cluster storage. If the free space is lower than this value, the data is transferred to Yandex Object Storage. Acceptable values are 0 to 1, inclusive. -MoveFactor *float64 `json:"moveFactor,omitempty" tf:"move_factor,omitempty"` + // Sets the minimum free space ratio in the cluster storage. If the free space is lower than this value, the data is transferred to Yandex Object Storage. Acceptable values are 0 to 1, inclusive. + MoveFactor *float64 `json:"moveFactor,omitempty" tf:"move_factor,omitempty"` -// Disables merging of data parts in Yandex Object Storage. -PreferNotToMerge *bool `json:"preferNotToMerge,omitempty" tf:"prefer_not_to_merge,omitempty"` + // Disables merging of data parts in Yandex Object Storage. + PreferNotToMerge *bool `json:"preferNotToMerge,omitempty" tf:"prefer_not_to_merge,omitempty"` } - type CloudStorageObservation struct { + // Enables temporary storage in the cluster repository of data requested from the object repository. + DataCacheEnabled *bool `json:"dataCacheEnabled,omitempty" tf:"data_cache_enabled,omitempty"` -// Enables temporary storage in the cluster repository of data requested from the object repository. -DataCacheEnabled *bool `json:"dataCacheEnabled,omitempty" tf:"data_cache_enabled,omitempty"` - -// Defines the maximum amount of memory (in bytes) allocated in the cluster storage for temporary storage of data requested from the object storage. -DataCacheMaxSize *float64 `json:"dataCacheMaxSize,omitempty" tf:"data_cache_max_size,omitempty"` + // Defines the maximum amount of memory (in bytes) allocated in the cluster storage for temporary storage of data requested from the object storage. + DataCacheMaxSize *float64 `json:"dataCacheMaxSize,omitempty" tf:"data_cache_max_size,omitempty"` -// Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false. -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// Sets the minimum free space ratio in the cluster storage. If the free space is lower than this value, the data is transferred to Yandex Object Storage. Acceptable values are 0 to 1, inclusive. -MoveFactor *float64 `json:"moveFactor,omitempty" tf:"move_factor,omitempty"` + // Sets the minimum free space ratio in the cluster storage. If the free space is lower than this value, the data is transferred to Yandex Object Storage. Acceptable values are 0 to 1, inclusive. + MoveFactor *float64 `json:"moveFactor,omitempty" tf:"move_factor,omitempty"` -// Disables merging of data parts in Yandex Object Storage. -PreferNotToMerge *bool `json:"preferNotToMerge,omitempty" tf:"prefer_not_to_merge,omitempty"` + // Disables merging of data parts in Yandex Object Storage. + PreferNotToMerge *bool `json:"preferNotToMerge,omitempty" tf:"prefer_not_to_merge,omitempty"` } - type CloudStorageParameters struct { + // Enables temporary storage in the cluster repository of data requested from the object repository. + // +kubebuilder:validation:Optional + DataCacheEnabled *bool `json:"dataCacheEnabled,omitempty" tf:"data_cache_enabled,omitempty"` -// Enables temporary storage in the cluster repository of data requested from the object repository. -// +kubebuilder:validation:Optional -DataCacheEnabled *bool `json:"dataCacheEnabled,omitempty" tf:"data_cache_enabled,omitempty"` - -// Defines the maximum amount of memory (in bytes) allocated in the cluster storage for temporary storage of data requested from the object storage. -// +kubebuilder:validation:Optional -DataCacheMaxSize *float64 `json:"dataCacheMaxSize,omitempty" tf:"data_cache_max_size,omitempty"` + // Defines the maximum amount of memory (in bytes) allocated in the cluster storage for temporary storage of data requested from the object storage. + // +kubebuilder:validation:Optional + DataCacheMaxSize *float64 `json:"dataCacheMaxSize,omitempty" tf:"data_cache_max_size,omitempty"` -// Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false. -// +kubebuilder:validation:Optional -Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + // Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` -// Sets the minimum free space ratio in the cluster storage. If the free space is lower than this value, the data is transferred to Yandex Object Storage. Acceptable values are 0 to 1, inclusive. -// +kubebuilder:validation:Optional -MoveFactor *float64 `json:"moveFactor,omitempty" tf:"move_factor,omitempty"` + // Sets the minimum free space ratio in the cluster storage. If the free space is lower than this value, the data is transferred to Yandex Object Storage. Acceptable values are 0 to 1, inclusive. + // +kubebuilder:validation:Optional + MoveFactor *float64 `json:"moveFactor,omitempty" tf:"move_factor,omitempty"` -// Disables merging of data parts in Yandex Object Storage. -// +kubebuilder:validation:Optional -PreferNotToMerge *bool `json:"preferNotToMerge,omitempty" tf:"prefer_not_to_merge,omitempty"` + // Disables merging of data parts in Yandex Object Storage. + // +kubebuilder:validation:Optional + PreferNotToMerge *bool `json:"preferNotToMerge,omitempty" tf:"prefer_not_to_merge,omitempty"` } - type CompressionInitParameters struct { + // Compression level for ZSTD method. + Level *float64 `json:"level,omitempty" tf:"level,omitempty"` -// Compression level for ZSTD method. -Level *float64 `json:"level,omitempty" tf:"level,omitempty"` - -// Method: Compression method. Two methods are available: LZ4 and zstd. -Method *string `json:"method,omitempty" tf:"method,omitempty"` + // Method: Compression method. Two methods are available: LZ4 and zstd. + Method *string `json:"method,omitempty" tf:"method,omitempty"` -// Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value. -MinPartSize *float64 `json:"minPartSize,omitempty" tf:"min_part_size,omitempty"` + // Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value. + MinPartSize *float64 `json:"minPartSize,omitempty" tf:"min_part_size,omitempty"` -// Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value. -MinPartSizeRatio *float64 `json:"minPartSizeRatio,omitempty" tf:"min_part_size_ratio,omitempty"` + // Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value. + MinPartSizeRatio *float64 `json:"minPartSizeRatio,omitempty" tf:"min_part_size_ratio,omitempty"` } - type CompressionObservation struct { + // Compression level for ZSTD method. + Level *float64 `json:"level,omitempty" tf:"level,omitempty"` -// Compression level for ZSTD method. -Level *float64 `json:"level,omitempty" tf:"level,omitempty"` - -// Method: Compression method. Two methods are available: LZ4 and zstd. -Method *string `json:"method,omitempty" tf:"method,omitempty"` + // Method: Compression method. Two methods are available: LZ4 and zstd. + Method *string `json:"method,omitempty" tf:"method,omitempty"` -// Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value. -MinPartSize *float64 `json:"minPartSize,omitempty" tf:"min_part_size,omitempty"` + // Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value. + MinPartSize *float64 `json:"minPartSize,omitempty" tf:"min_part_size,omitempty"` -// Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value. -MinPartSizeRatio *float64 `json:"minPartSizeRatio,omitempty" tf:"min_part_size_ratio,omitempty"` + // Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value. + MinPartSizeRatio *float64 `json:"minPartSizeRatio,omitempty" tf:"min_part_size_ratio,omitempty"` } - type CompressionParameters struct { + // Compression level for ZSTD method. + // +kubebuilder:validation:Optional + Level *float64 `json:"level,omitempty" tf:"level,omitempty"` -// Compression level for ZSTD method. -// +kubebuilder:validation:Optional -Level *float64 `json:"level,omitempty" tf:"level,omitempty"` + // Method: Compression method. Two methods are available: LZ4 and zstd. + // +kubebuilder:validation:Optional + Method *string `json:"method" tf:"method,omitempty"` -// Method: Compression method. Two methods are available: LZ4 and zstd. -// +kubebuilder:validation:Optional -Method *string `json:"method" tf:"method,omitempty"` + // Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value. + // +kubebuilder:validation:Optional + MinPartSize *float64 `json:"minPartSize" tf:"min_part_size,omitempty"` -// Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value. -// +kubebuilder:validation:Optional -MinPartSize *float64 `json:"minPartSize" tf:"min_part_size,omitempty"` - -// Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value. -// +kubebuilder:validation:Optional -MinPartSizeRatio *float64 `json:"minPartSizeRatio" tf:"min_part_size_ratio,omitempty"` + // Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value. + // +kubebuilder:validation:Optional + MinPartSizeRatio *float64 `json:"minPartSizeRatio" tf:"min_part_size_ratio,omitempty"` } - type ConfigInitParameters struct { + AsynchronousInsertLogEnabled *bool `json:"asynchronousInsertLogEnabled,omitempty" tf:"asynchronous_insert_log_enabled,omitempty"` + AsynchronousInsertLogRetentionSize *float64 `json:"asynchronousInsertLogRetentionSize,omitempty" tf:"asynchronous_insert_log_retention_size,omitempty"` -AsynchronousInsertLogEnabled *bool `json:"asynchronousInsertLogEnabled,omitempty" tf:"asynchronous_insert_log_enabled,omitempty"` - -AsynchronousInsertLogRetentionSize *float64 `json:"asynchronousInsertLogRetentionSize,omitempty" tf:"asynchronous_insert_log_retention_size,omitempty"` + AsynchronousInsertLogRetentionTime *float64 `json:"asynchronousInsertLogRetentionTime,omitempty" tf:"asynchronous_insert_log_retention_time,omitempty"` -AsynchronousInsertLogRetentionTime *float64 `json:"asynchronousInsertLogRetentionTime,omitempty" tf:"asynchronous_insert_log_retention_time,omitempty"` + AsynchronousMetricLogEnabled *bool `json:"asynchronousMetricLogEnabled,omitempty" tf:"asynchronous_metric_log_enabled,omitempty"` -AsynchronousMetricLogEnabled *bool `json:"asynchronousMetricLogEnabled,omitempty" tf:"asynchronous_metric_log_enabled,omitempty"` + AsynchronousMetricLogRetentionSize *float64 `json:"asynchronousMetricLogRetentionSize,omitempty" tf:"asynchronous_metric_log_retention_size,omitempty"` -AsynchronousMetricLogRetentionSize *float64 `json:"asynchronousMetricLogRetentionSize,omitempty" tf:"asynchronous_metric_log_retention_size,omitempty"` + AsynchronousMetricLogRetentionTime *float64 `json:"asynchronousMetricLogRetentionTime,omitempty" tf:"asynchronous_metric_log_retention_time,omitempty"` -AsynchronousMetricLogRetentionTime *float64 `json:"asynchronousMetricLogRetentionTime,omitempty" tf:"asynchronous_metric_log_retention_time,omitempty"` + BackgroundBufferFlushSchedulePoolSize *float64 `json:"backgroundBufferFlushSchedulePoolSize,omitempty" tf:"background_buffer_flush_schedule_pool_size,omitempty"` -BackgroundBufferFlushSchedulePoolSize *float64 `json:"backgroundBufferFlushSchedulePoolSize,omitempty" tf:"background_buffer_flush_schedule_pool_size,omitempty"` + BackgroundCommonPoolSize *float64 `json:"backgroundCommonPoolSize,omitempty" tf:"background_common_pool_size,omitempty"` -BackgroundCommonPoolSize *float64 `json:"backgroundCommonPoolSize,omitempty" tf:"background_common_pool_size,omitempty"` + BackgroundDistributedSchedulePoolSize *float64 `json:"backgroundDistributedSchedulePoolSize,omitempty" tf:"background_distributed_schedule_pool_size,omitempty"` -BackgroundDistributedSchedulePoolSize *float64 `json:"backgroundDistributedSchedulePoolSize,omitempty" tf:"background_distributed_schedule_pool_size,omitempty"` + BackgroundFetchesPoolSize *float64 `json:"backgroundFetchesPoolSize,omitempty" tf:"background_fetches_pool_size,omitempty"` -BackgroundFetchesPoolSize *float64 `json:"backgroundFetchesPoolSize,omitempty" tf:"background_fetches_pool_size,omitempty"` + BackgroundMergesMutationsConcurrencyRatio *float64 `json:"backgroundMergesMutationsConcurrencyRatio,omitempty" tf:"background_merges_mutations_concurrency_ratio,omitempty"` -BackgroundMergesMutationsConcurrencyRatio *float64 `json:"backgroundMergesMutationsConcurrencyRatio,omitempty" tf:"background_merges_mutations_concurrency_ratio,omitempty"` + BackgroundMessageBrokerSchedulePoolSize *float64 `json:"backgroundMessageBrokerSchedulePoolSize,omitempty" tf:"background_message_broker_schedule_pool_size,omitempty"` -BackgroundMessageBrokerSchedulePoolSize *float64 `json:"backgroundMessageBrokerSchedulePoolSize,omitempty" tf:"background_message_broker_schedule_pool_size,omitempty"` + BackgroundMovePoolSize *float64 `json:"backgroundMovePoolSize,omitempty" tf:"background_move_pool_size,omitempty"` -BackgroundMovePoolSize *float64 `json:"backgroundMovePoolSize,omitempty" tf:"background_move_pool_size,omitempty"` + BackgroundPoolSize *float64 `json:"backgroundPoolSize,omitempty" tf:"background_pool_size,omitempty"` -BackgroundPoolSize *float64 `json:"backgroundPoolSize,omitempty" tf:"background_pool_size,omitempty"` + BackgroundSchedulePoolSize *float64 `json:"backgroundSchedulePoolSize,omitempty" tf:"background_schedule_pool_size,omitempty"` -BackgroundSchedulePoolSize *float64 `json:"backgroundSchedulePoolSize,omitempty" tf:"background_schedule_pool_size,omitempty"` + // Data compression configuration. The structure is documented below. + Compression []CompressionInitParameters `json:"compression,omitempty" tf:"compression,omitempty"` -// Data compression configuration. The structure is documented below. -Compression []CompressionInitParameters `json:"compression,omitempty" tf:"compression,omitempty"` + // A database of the ClickHouse cluster. The structure is documented below. + DefaultDatabase *string `json:"defaultDatabase,omitempty" tf:"default_database,omitempty"` -// A database of the ClickHouse cluster. The structure is documented below. -DefaultDatabase *string `json:"defaultDatabase,omitempty" tf:"default_database,omitempty"` + DictionariesLazyLoad *bool `json:"dictionariesLazyLoad,omitempty" tf:"dictionaries_lazy_load,omitempty"` -DictionariesLazyLoad *bool `json:"dictionariesLazyLoad,omitempty" tf:"dictionaries_lazy_load,omitempty"` + GeobaseEnabled *bool `json:"geobaseEnabled,omitempty" tf:"geobase_enabled,omitempty"` -GeobaseEnabled *bool `json:"geobaseEnabled,omitempty" tf:"geobase_enabled,omitempty"` + GeobaseURI *string `json:"geobaseUri,omitempty" tf:"geobase_uri,omitempty"` -GeobaseURI *string `json:"geobaseUri,omitempty" tf:"geobase_uri,omitempty"` + // Graphite rollup configuration. The structure is documented below. + GraphiteRollup []GraphiteRollupInitParameters `json:"graphiteRollup,omitempty" tf:"graphite_rollup,omitempty"` -// Graphite rollup configuration. The structure is documented below. -GraphiteRollup []GraphiteRollupInitParameters `json:"graphiteRollup,omitempty" tf:"graphite_rollup,omitempty"` + // Kafka connection configuration. The structure is documented below. + Kafka []KafkaInitParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` -// Kafka connection configuration. The structure is documented below. -Kafka []KafkaInitParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` + // Kafka topic connection configuration. The structure is documented below. + KafkaTopic []KafkaTopicInitParameters `json:"kafkaTopic,omitempty" tf:"kafka_topic,omitempty"` -// Kafka topic connection configuration. The structure is documented below. -KafkaTopic []KafkaTopicInitParameters `json:"kafkaTopic,omitempty" tf:"kafka_topic,omitempty"` + KeepAliveTimeout *float64 `json:"keepAliveTimeout,omitempty" tf:"keep_alive_timeout,omitempty"` -KeepAliveTimeout *float64 `json:"keepAliveTimeout,omitempty" tf:"keep_alive_timeout,omitempty"` + // ClickHouse server parameters. For more information, see the official documentation. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` -// ClickHouse server parameters. For more information, see the official documentation. -LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + MarkCacheSize *float64 `json:"markCacheSize,omitempty" tf:"mark_cache_size,omitempty"` -MarkCacheSize *float64 `json:"markCacheSize,omitempty" tf:"mark_cache_size,omitempty"` + MaxConcurrentQueries *float64 `json:"maxConcurrentQueries,omitempty" tf:"max_concurrent_queries,omitempty"` -MaxConcurrentQueries *float64 `json:"maxConcurrentQueries,omitempty" tf:"max_concurrent_queries,omitempty"` + MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` -MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` + MaxPartitionSizeToDrop *float64 `json:"maxPartitionSizeToDrop,omitempty" tf:"max_partition_size_to_drop,omitempty"` -MaxPartitionSizeToDrop *float64 `json:"maxPartitionSizeToDrop,omitempty" tf:"max_partition_size_to_drop,omitempty"` + MaxTableSizeToDrop *float64 `json:"maxTableSizeToDrop,omitempty" tf:"max_table_size_to_drop,omitempty"` -MaxTableSizeToDrop *float64 `json:"maxTableSizeToDrop,omitempty" tf:"max_table_size_to_drop,omitempty"` + // MergeTree engine configuration. The structure is documented below. + MergeTree []MergeTreeInitParameters `json:"mergeTree,omitempty" tf:"merge_tree,omitempty"` -// MergeTree engine configuration. The structure is documented below. -MergeTree []MergeTreeInitParameters `json:"mergeTree,omitempty" tf:"merge_tree,omitempty"` + MetricLogEnabled *bool `json:"metricLogEnabled,omitempty" tf:"metric_log_enabled,omitempty"` -MetricLogEnabled *bool `json:"metricLogEnabled,omitempty" tf:"metric_log_enabled,omitempty"` + MetricLogRetentionSize *float64 `json:"metricLogRetentionSize,omitempty" tf:"metric_log_retention_size,omitempty"` -MetricLogRetentionSize *float64 `json:"metricLogRetentionSize,omitempty" tf:"metric_log_retention_size,omitempty"` + MetricLogRetentionTime *float64 `json:"metricLogRetentionTime,omitempty" tf:"metric_log_retention_time,omitempty"` -MetricLogRetentionTime *float64 `json:"metricLogRetentionTime,omitempty" tf:"metric_log_retention_time,omitempty"` + OpentelemetrySpanLogEnabled *bool `json:"opentelemetrySpanLogEnabled,omitempty" tf:"opentelemetry_span_log_enabled,omitempty"` -OpentelemetrySpanLogEnabled *bool `json:"opentelemetrySpanLogEnabled,omitempty" tf:"opentelemetry_span_log_enabled,omitempty"` + OpentelemetrySpanLogRetentionSize *float64 `json:"opentelemetrySpanLogRetentionSize,omitempty" tf:"opentelemetry_span_log_retention_size,omitempty"` -OpentelemetrySpanLogRetentionSize *float64 `json:"opentelemetrySpanLogRetentionSize,omitempty" tf:"opentelemetry_span_log_retention_size,omitempty"` + OpentelemetrySpanLogRetentionTime *float64 `json:"opentelemetrySpanLogRetentionTime,omitempty" tf:"opentelemetry_span_log_retention_time,omitempty"` -OpentelemetrySpanLogRetentionTime *float64 `json:"opentelemetrySpanLogRetentionTime,omitempty" tf:"opentelemetry_span_log_retention_time,omitempty"` + PartLogRetentionSize *float64 `json:"partLogRetentionSize,omitempty" tf:"part_log_retention_size,omitempty"` -PartLogRetentionSize *float64 `json:"partLogRetentionSize,omitempty" tf:"part_log_retention_size,omitempty"` + PartLogRetentionTime *float64 `json:"partLogRetentionTime,omitempty" tf:"part_log_retention_time,omitempty"` -PartLogRetentionTime *float64 `json:"partLogRetentionTime,omitempty" tf:"part_log_retention_time,omitempty"` + // Query cache configuration. The structure is documented below. + QueryCache []QueryCacheInitParameters `json:"queryCache,omitempty" tf:"query_cache,omitempty"` -// Query cache configuration. The structure is documented below. -QueryCache []QueryCacheInitParameters `json:"queryCache,omitempty" tf:"query_cache,omitempty"` + QueryLogRetentionSize *float64 `json:"queryLogRetentionSize,omitempty" tf:"query_log_retention_size,omitempty"` -QueryLogRetentionSize *float64 `json:"queryLogRetentionSize,omitempty" tf:"query_log_retention_size,omitempty"` + QueryLogRetentionTime *float64 `json:"queryLogRetentionTime,omitempty" tf:"query_log_retention_time,omitempty"` -QueryLogRetentionTime *float64 `json:"queryLogRetentionTime,omitempty" tf:"query_log_retention_time,omitempty"` + // Query masking rules configuration. The structure is documented below. + QueryMaskingRules []QueryMaskingRulesInitParameters `json:"queryMaskingRules,omitempty" tf:"query_masking_rules,omitempty"` -// Query masking rules configuration. The structure is documented below. -QueryMaskingRules []QueryMaskingRulesInitParameters `json:"queryMaskingRules,omitempty" tf:"query_masking_rules,omitempty"` + QueryThreadLogEnabled *bool `json:"queryThreadLogEnabled,omitempty" tf:"query_thread_log_enabled,omitempty"` -QueryThreadLogEnabled *bool `json:"queryThreadLogEnabled,omitempty" tf:"query_thread_log_enabled,omitempty"` + QueryThreadLogRetentionSize *float64 `json:"queryThreadLogRetentionSize,omitempty" tf:"query_thread_log_retention_size,omitempty"` -QueryThreadLogRetentionSize *float64 `json:"queryThreadLogRetentionSize,omitempty" tf:"query_thread_log_retention_size,omitempty"` + QueryThreadLogRetentionTime *float64 `json:"queryThreadLogRetentionTime,omitempty" tf:"query_thread_log_retention_time,omitempty"` -QueryThreadLogRetentionTime *float64 `json:"queryThreadLogRetentionTime,omitempty" tf:"query_thread_log_retention_time,omitempty"` + QueryViewsLogEnabled *bool `json:"queryViewsLogEnabled,omitempty" tf:"query_views_log_enabled,omitempty"` -QueryViewsLogEnabled *bool `json:"queryViewsLogEnabled,omitempty" tf:"query_views_log_enabled,omitempty"` + QueryViewsLogRetentionSize *float64 `json:"queryViewsLogRetentionSize,omitempty" tf:"query_views_log_retention_size,omitempty"` -QueryViewsLogRetentionSize *float64 `json:"queryViewsLogRetentionSize,omitempty" tf:"query_views_log_retention_size,omitempty"` + QueryViewsLogRetentionTime *float64 `json:"queryViewsLogRetentionTime,omitempty" tf:"query_views_log_retention_time,omitempty"` -QueryViewsLogRetentionTime *float64 `json:"queryViewsLogRetentionTime,omitempty" tf:"query_views_log_retention_time,omitempty"` + // RabbitMQ connection configuration. The structure is documented below. + Rabbitmq []RabbitmqInitParameters `json:"rabbitmq,omitempty" tf:"rabbitmq,omitempty"` -// RabbitMQ connection configuration. The structure is documented below. -Rabbitmq []RabbitmqInitParameters `json:"rabbitmq,omitempty" tf:"rabbitmq,omitempty"` + SessionLogEnabled *bool `json:"sessionLogEnabled,omitempty" tf:"session_log_enabled,omitempty"` -SessionLogEnabled *bool `json:"sessionLogEnabled,omitempty" tf:"session_log_enabled,omitempty"` + SessionLogRetentionSize *float64 `json:"sessionLogRetentionSize,omitempty" tf:"session_log_retention_size,omitempty"` -SessionLogRetentionSize *float64 `json:"sessionLogRetentionSize,omitempty" tf:"session_log_retention_size,omitempty"` + SessionLogRetentionTime *float64 `json:"sessionLogRetentionTime,omitempty" tf:"session_log_retention_time,omitempty"` -SessionLogRetentionTime *float64 `json:"sessionLogRetentionTime,omitempty" tf:"session_log_retention_time,omitempty"` + TextLogEnabled *bool `json:"textLogEnabled,omitempty" tf:"text_log_enabled,omitempty"` -TextLogEnabled *bool `json:"textLogEnabled,omitempty" tf:"text_log_enabled,omitempty"` + TextLogLevel *string `json:"textLogLevel,omitempty" tf:"text_log_level,omitempty"` -TextLogLevel *string `json:"textLogLevel,omitempty" tf:"text_log_level,omitempty"` + TextLogRetentionSize *float64 `json:"textLogRetentionSize,omitempty" tf:"text_log_retention_size,omitempty"` -TextLogRetentionSize *float64 `json:"textLogRetentionSize,omitempty" tf:"text_log_retention_size,omitempty"` + TextLogRetentionTime *float64 `json:"textLogRetentionTime,omitempty" tf:"text_log_retention_time,omitempty"` -TextLogRetentionTime *float64 `json:"textLogRetentionTime,omitempty" tf:"text_log_retention_time,omitempty"` + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` -Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + TotalMemoryProfilerStep *float64 `json:"totalMemoryProfilerStep,omitempty" tf:"total_memory_profiler_step,omitempty"` -TotalMemoryProfilerStep *float64 `json:"totalMemoryProfilerStep,omitempty" tf:"total_memory_profiler_step,omitempty"` + TraceLogEnabled *bool `json:"traceLogEnabled,omitempty" tf:"trace_log_enabled,omitempty"` -TraceLogEnabled *bool `json:"traceLogEnabled,omitempty" tf:"trace_log_enabled,omitempty"` + TraceLogRetentionSize *float64 `json:"traceLogRetentionSize,omitempty" tf:"trace_log_retention_size,omitempty"` -TraceLogRetentionSize *float64 `json:"traceLogRetentionSize,omitempty" tf:"trace_log_retention_size,omitempty"` + TraceLogRetentionTime *float64 `json:"traceLogRetentionTime,omitempty" tf:"trace_log_retention_time,omitempty"` -TraceLogRetentionTime *float64 `json:"traceLogRetentionTime,omitempty" tf:"trace_log_retention_time,omitempty"` + UncompressedCacheSize *float64 `json:"uncompressedCacheSize,omitempty" tf:"uncompressed_cache_size,omitempty"` -UncompressedCacheSize *float64 `json:"uncompressedCacheSize,omitempty" tf:"uncompressed_cache_size,omitempty"` + ZookeeperLogEnabled *bool `json:"zookeeperLogEnabled,omitempty" tf:"zookeeper_log_enabled,omitempty"` -ZookeeperLogEnabled *bool `json:"zookeeperLogEnabled,omitempty" tf:"zookeeper_log_enabled,omitempty"` + ZookeeperLogRetentionSize *float64 `json:"zookeeperLogRetentionSize,omitempty" tf:"zookeeper_log_retention_size,omitempty"` -ZookeeperLogRetentionSize *float64 `json:"zookeeperLogRetentionSize,omitempty" tf:"zookeeper_log_retention_size,omitempty"` - -ZookeeperLogRetentionTime *float64 `json:"zookeeperLogRetentionTime,omitempty" tf:"zookeeper_log_retention_time,omitempty"` + ZookeeperLogRetentionTime *float64 `json:"zookeeperLogRetentionTime,omitempty" tf:"zookeeper_log_retention_time,omitempty"` } - type ConfigObservation struct { + AsynchronousInsertLogEnabled *bool `json:"asynchronousInsertLogEnabled,omitempty" tf:"asynchronous_insert_log_enabled,omitempty"` + AsynchronousInsertLogRetentionSize *float64 `json:"asynchronousInsertLogRetentionSize,omitempty" tf:"asynchronous_insert_log_retention_size,omitempty"` -AsynchronousInsertLogEnabled *bool `json:"asynchronousInsertLogEnabled,omitempty" tf:"asynchronous_insert_log_enabled,omitempty"` - -AsynchronousInsertLogRetentionSize *float64 `json:"asynchronousInsertLogRetentionSize,omitempty" tf:"asynchronous_insert_log_retention_size,omitempty"` + AsynchronousInsertLogRetentionTime *float64 `json:"asynchronousInsertLogRetentionTime,omitempty" tf:"asynchronous_insert_log_retention_time,omitempty"` -AsynchronousInsertLogRetentionTime *float64 `json:"asynchronousInsertLogRetentionTime,omitempty" tf:"asynchronous_insert_log_retention_time,omitempty"` + AsynchronousMetricLogEnabled *bool `json:"asynchronousMetricLogEnabled,omitempty" tf:"asynchronous_metric_log_enabled,omitempty"` -AsynchronousMetricLogEnabled *bool `json:"asynchronousMetricLogEnabled,omitempty" tf:"asynchronous_metric_log_enabled,omitempty"` + AsynchronousMetricLogRetentionSize *float64 `json:"asynchronousMetricLogRetentionSize,omitempty" tf:"asynchronous_metric_log_retention_size,omitempty"` -AsynchronousMetricLogRetentionSize *float64 `json:"asynchronousMetricLogRetentionSize,omitempty" tf:"asynchronous_metric_log_retention_size,omitempty"` + AsynchronousMetricLogRetentionTime *float64 `json:"asynchronousMetricLogRetentionTime,omitempty" tf:"asynchronous_metric_log_retention_time,omitempty"` -AsynchronousMetricLogRetentionTime *float64 `json:"asynchronousMetricLogRetentionTime,omitempty" tf:"asynchronous_metric_log_retention_time,omitempty"` + BackgroundBufferFlushSchedulePoolSize *float64 `json:"backgroundBufferFlushSchedulePoolSize,omitempty" tf:"background_buffer_flush_schedule_pool_size,omitempty"` -BackgroundBufferFlushSchedulePoolSize *float64 `json:"backgroundBufferFlushSchedulePoolSize,omitempty" tf:"background_buffer_flush_schedule_pool_size,omitempty"` + BackgroundCommonPoolSize *float64 `json:"backgroundCommonPoolSize,omitempty" tf:"background_common_pool_size,omitempty"` -BackgroundCommonPoolSize *float64 `json:"backgroundCommonPoolSize,omitempty" tf:"background_common_pool_size,omitempty"` + BackgroundDistributedSchedulePoolSize *float64 `json:"backgroundDistributedSchedulePoolSize,omitempty" tf:"background_distributed_schedule_pool_size,omitempty"` -BackgroundDistributedSchedulePoolSize *float64 `json:"backgroundDistributedSchedulePoolSize,omitempty" tf:"background_distributed_schedule_pool_size,omitempty"` + BackgroundFetchesPoolSize *float64 `json:"backgroundFetchesPoolSize,omitempty" tf:"background_fetches_pool_size,omitempty"` -BackgroundFetchesPoolSize *float64 `json:"backgroundFetchesPoolSize,omitempty" tf:"background_fetches_pool_size,omitempty"` + BackgroundMergesMutationsConcurrencyRatio *float64 `json:"backgroundMergesMutationsConcurrencyRatio,omitempty" tf:"background_merges_mutations_concurrency_ratio,omitempty"` -BackgroundMergesMutationsConcurrencyRatio *float64 `json:"backgroundMergesMutationsConcurrencyRatio,omitempty" tf:"background_merges_mutations_concurrency_ratio,omitempty"` + BackgroundMessageBrokerSchedulePoolSize *float64 `json:"backgroundMessageBrokerSchedulePoolSize,omitempty" tf:"background_message_broker_schedule_pool_size,omitempty"` -BackgroundMessageBrokerSchedulePoolSize *float64 `json:"backgroundMessageBrokerSchedulePoolSize,omitempty" tf:"background_message_broker_schedule_pool_size,omitempty"` + BackgroundMovePoolSize *float64 `json:"backgroundMovePoolSize,omitempty" tf:"background_move_pool_size,omitempty"` -BackgroundMovePoolSize *float64 `json:"backgroundMovePoolSize,omitempty" tf:"background_move_pool_size,omitempty"` + BackgroundPoolSize *float64 `json:"backgroundPoolSize,omitempty" tf:"background_pool_size,omitempty"` -BackgroundPoolSize *float64 `json:"backgroundPoolSize,omitempty" tf:"background_pool_size,omitempty"` + BackgroundSchedulePoolSize *float64 `json:"backgroundSchedulePoolSize,omitempty" tf:"background_schedule_pool_size,omitempty"` -BackgroundSchedulePoolSize *float64 `json:"backgroundSchedulePoolSize,omitempty" tf:"background_schedule_pool_size,omitempty"` + // Data compression configuration. The structure is documented below. + Compression []CompressionObservation `json:"compression,omitempty" tf:"compression,omitempty"` -// Data compression configuration. The structure is documented below. -Compression []CompressionObservation `json:"compression,omitempty" tf:"compression,omitempty"` + // A database of the ClickHouse cluster. The structure is documented below. + DefaultDatabase *string `json:"defaultDatabase,omitempty" tf:"default_database,omitempty"` -// A database of the ClickHouse cluster. The structure is documented below. -DefaultDatabase *string `json:"defaultDatabase,omitempty" tf:"default_database,omitempty"` + DictionariesLazyLoad *bool `json:"dictionariesLazyLoad,omitempty" tf:"dictionaries_lazy_load,omitempty"` -DictionariesLazyLoad *bool `json:"dictionariesLazyLoad,omitempty" tf:"dictionaries_lazy_load,omitempty"` + GeobaseEnabled *bool `json:"geobaseEnabled,omitempty" tf:"geobase_enabled,omitempty"` -GeobaseEnabled *bool `json:"geobaseEnabled,omitempty" tf:"geobase_enabled,omitempty"` + GeobaseURI *string `json:"geobaseUri,omitempty" tf:"geobase_uri,omitempty"` -GeobaseURI *string `json:"geobaseUri,omitempty" tf:"geobase_uri,omitempty"` + // Graphite rollup configuration. The structure is documented below. + GraphiteRollup []GraphiteRollupObservation `json:"graphiteRollup,omitempty" tf:"graphite_rollup,omitempty"` -// Graphite rollup configuration. The structure is documented below. -GraphiteRollup []GraphiteRollupObservation `json:"graphiteRollup,omitempty" tf:"graphite_rollup,omitempty"` + // Kafka connection configuration. The structure is documented below. + Kafka []KafkaObservation `json:"kafka,omitempty" tf:"kafka,omitempty"` -// Kafka connection configuration. The structure is documented below. -Kafka []KafkaObservation `json:"kafka,omitempty" tf:"kafka,omitempty"` + // Kafka topic connection configuration. The structure is documented below. + KafkaTopic []KafkaTopicObservation `json:"kafkaTopic,omitempty" tf:"kafka_topic,omitempty"` -// Kafka topic connection configuration. The structure is documented below. -KafkaTopic []KafkaTopicObservation `json:"kafkaTopic,omitempty" tf:"kafka_topic,omitempty"` + KeepAliveTimeout *float64 `json:"keepAliveTimeout,omitempty" tf:"keep_alive_timeout,omitempty"` -KeepAliveTimeout *float64 `json:"keepAliveTimeout,omitempty" tf:"keep_alive_timeout,omitempty"` + // ClickHouse server parameters. For more information, see the official documentation. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` -// ClickHouse server parameters. For more information, see the official documentation. -LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + MarkCacheSize *float64 `json:"markCacheSize,omitempty" tf:"mark_cache_size,omitempty"` -MarkCacheSize *float64 `json:"markCacheSize,omitempty" tf:"mark_cache_size,omitempty"` + MaxConcurrentQueries *float64 `json:"maxConcurrentQueries,omitempty" tf:"max_concurrent_queries,omitempty"` -MaxConcurrentQueries *float64 `json:"maxConcurrentQueries,omitempty" tf:"max_concurrent_queries,omitempty"` + MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` -MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` + MaxPartitionSizeToDrop *float64 `json:"maxPartitionSizeToDrop,omitempty" tf:"max_partition_size_to_drop,omitempty"` -MaxPartitionSizeToDrop *float64 `json:"maxPartitionSizeToDrop,omitempty" tf:"max_partition_size_to_drop,omitempty"` + MaxTableSizeToDrop *float64 `json:"maxTableSizeToDrop,omitempty" tf:"max_table_size_to_drop,omitempty"` -MaxTableSizeToDrop *float64 `json:"maxTableSizeToDrop,omitempty" tf:"max_table_size_to_drop,omitempty"` + // MergeTree engine configuration. The structure is documented below. + MergeTree []MergeTreeObservation `json:"mergeTree,omitempty" tf:"merge_tree,omitempty"` -// MergeTree engine configuration. The structure is documented below. -MergeTree []MergeTreeObservation `json:"mergeTree,omitempty" tf:"merge_tree,omitempty"` + MetricLogEnabled *bool `json:"metricLogEnabled,omitempty" tf:"metric_log_enabled,omitempty"` -MetricLogEnabled *bool `json:"metricLogEnabled,omitempty" tf:"metric_log_enabled,omitempty"` + MetricLogRetentionSize *float64 `json:"metricLogRetentionSize,omitempty" tf:"metric_log_retention_size,omitempty"` -MetricLogRetentionSize *float64 `json:"metricLogRetentionSize,omitempty" tf:"metric_log_retention_size,omitempty"` + MetricLogRetentionTime *float64 `json:"metricLogRetentionTime,omitempty" tf:"metric_log_retention_time,omitempty"` -MetricLogRetentionTime *float64 `json:"metricLogRetentionTime,omitempty" tf:"metric_log_retention_time,omitempty"` + OpentelemetrySpanLogEnabled *bool `json:"opentelemetrySpanLogEnabled,omitempty" tf:"opentelemetry_span_log_enabled,omitempty"` -OpentelemetrySpanLogEnabled *bool `json:"opentelemetrySpanLogEnabled,omitempty" tf:"opentelemetry_span_log_enabled,omitempty"` + OpentelemetrySpanLogRetentionSize *float64 `json:"opentelemetrySpanLogRetentionSize,omitempty" tf:"opentelemetry_span_log_retention_size,omitempty"` -OpentelemetrySpanLogRetentionSize *float64 `json:"opentelemetrySpanLogRetentionSize,omitempty" tf:"opentelemetry_span_log_retention_size,omitempty"` + OpentelemetrySpanLogRetentionTime *float64 `json:"opentelemetrySpanLogRetentionTime,omitempty" tf:"opentelemetry_span_log_retention_time,omitempty"` -OpentelemetrySpanLogRetentionTime *float64 `json:"opentelemetrySpanLogRetentionTime,omitempty" tf:"opentelemetry_span_log_retention_time,omitempty"` + PartLogRetentionSize *float64 `json:"partLogRetentionSize,omitempty" tf:"part_log_retention_size,omitempty"` -PartLogRetentionSize *float64 `json:"partLogRetentionSize,omitempty" tf:"part_log_retention_size,omitempty"` + PartLogRetentionTime *float64 `json:"partLogRetentionTime,omitempty" tf:"part_log_retention_time,omitempty"` -PartLogRetentionTime *float64 `json:"partLogRetentionTime,omitempty" tf:"part_log_retention_time,omitempty"` + // Query cache configuration. The structure is documented below. + QueryCache []QueryCacheObservation `json:"queryCache,omitempty" tf:"query_cache,omitempty"` -// Query cache configuration. The structure is documented below. -QueryCache []QueryCacheObservation `json:"queryCache,omitempty" tf:"query_cache,omitempty"` + QueryLogRetentionSize *float64 `json:"queryLogRetentionSize,omitempty" tf:"query_log_retention_size,omitempty"` -QueryLogRetentionSize *float64 `json:"queryLogRetentionSize,omitempty" tf:"query_log_retention_size,omitempty"` + QueryLogRetentionTime *float64 `json:"queryLogRetentionTime,omitempty" tf:"query_log_retention_time,omitempty"` -QueryLogRetentionTime *float64 `json:"queryLogRetentionTime,omitempty" tf:"query_log_retention_time,omitempty"` + // Query masking rules configuration. The structure is documented below. + QueryMaskingRules []QueryMaskingRulesObservation `json:"queryMaskingRules,omitempty" tf:"query_masking_rules,omitempty"` -// Query masking rules configuration. The structure is documented below. -QueryMaskingRules []QueryMaskingRulesObservation `json:"queryMaskingRules,omitempty" tf:"query_masking_rules,omitempty"` + QueryThreadLogEnabled *bool `json:"queryThreadLogEnabled,omitempty" tf:"query_thread_log_enabled,omitempty"` -QueryThreadLogEnabled *bool `json:"queryThreadLogEnabled,omitempty" tf:"query_thread_log_enabled,omitempty"` + QueryThreadLogRetentionSize *float64 `json:"queryThreadLogRetentionSize,omitempty" tf:"query_thread_log_retention_size,omitempty"` -QueryThreadLogRetentionSize *float64 `json:"queryThreadLogRetentionSize,omitempty" tf:"query_thread_log_retention_size,omitempty"` + QueryThreadLogRetentionTime *float64 `json:"queryThreadLogRetentionTime,omitempty" tf:"query_thread_log_retention_time,omitempty"` -QueryThreadLogRetentionTime *float64 `json:"queryThreadLogRetentionTime,omitempty" tf:"query_thread_log_retention_time,omitempty"` + QueryViewsLogEnabled *bool `json:"queryViewsLogEnabled,omitempty" tf:"query_views_log_enabled,omitempty"` -QueryViewsLogEnabled *bool `json:"queryViewsLogEnabled,omitempty" tf:"query_views_log_enabled,omitempty"` + QueryViewsLogRetentionSize *float64 `json:"queryViewsLogRetentionSize,omitempty" tf:"query_views_log_retention_size,omitempty"` -QueryViewsLogRetentionSize *float64 `json:"queryViewsLogRetentionSize,omitempty" tf:"query_views_log_retention_size,omitempty"` + QueryViewsLogRetentionTime *float64 `json:"queryViewsLogRetentionTime,omitempty" tf:"query_views_log_retention_time,omitempty"` -QueryViewsLogRetentionTime *float64 `json:"queryViewsLogRetentionTime,omitempty" tf:"query_views_log_retention_time,omitempty"` + // RabbitMQ connection configuration. The structure is documented below. + Rabbitmq []RabbitmqObservation `json:"rabbitmq,omitempty" tf:"rabbitmq,omitempty"` -// RabbitMQ connection configuration. The structure is documented below. -Rabbitmq []RabbitmqObservation `json:"rabbitmq,omitempty" tf:"rabbitmq,omitempty"` + SessionLogEnabled *bool `json:"sessionLogEnabled,omitempty" tf:"session_log_enabled,omitempty"` -SessionLogEnabled *bool `json:"sessionLogEnabled,omitempty" tf:"session_log_enabled,omitempty"` + SessionLogRetentionSize *float64 `json:"sessionLogRetentionSize,omitempty" tf:"session_log_retention_size,omitempty"` -SessionLogRetentionSize *float64 `json:"sessionLogRetentionSize,omitempty" tf:"session_log_retention_size,omitempty"` + SessionLogRetentionTime *float64 `json:"sessionLogRetentionTime,omitempty" tf:"session_log_retention_time,omitempty"` -SessionLogRetentionTime *float64 `json:"sessionLogRetentionTime,omitempty" tf:"session_log_retention_time,omitempty"` + TextLogEnabled *bool `json:"textLogEnabled,omitempty" tf:"text_log_enabled,omitempty"` -TextLogEnabled *bool `json:"textLogEnabled,omitempty" tf:"text_log_enabled,omitempty"` + TextLogLevel *string `json:"textLogLevel,omitempty" tf:"text_log_level,omitempty"` -TextLogLevel *string `json:"textLogLevel,omitempty" tf:"text_log_level,omitempty"` + TextLogRetentionSize *float64 `json:"textLogRetentionSize,omitempty" tf:"text_log_retention_size,omitempty"` -TextLogRetentionSize *float64 `json:"textLogRetentionSize,omitempty" tf:"text_log_retention_size,omitempty"` + TextLogRetentionTime *float64 `json:"textLogRetentionTime,omitempty" tf:"text_log_retention_time,omitempty"` -TextLogRetentionTime *float64 `json:"textLogRetentionTime,omitempty" tf:"text_log_retention_time,omitempty"` + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` -Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + TotalMemoryProfilerStep *float64 `json:"totalMemoryProfilerStep,omitempty" tf:"total_memory_profiler_step,omitempty"` -TotalMemoryProfilerStep *float64 `json:"totalMemoryProfilerStep,omitempty" tf:"total_memory_profiler_step,omitempty"` + TraceLogEnabled *bool `json:"traceLogEnabled,omitempty" tf:"trace_log_enabled,omitempty"` -TraceLogEnabled *bool `json:"traceLogEnabled,omitempty" tf:"trace_log_enabled,omitempty"` + TraceLogRetentionSize *float64 `json:"traceLogRetentionSize,omitempty" tf:"trace_log_retention_size,omitempty"` -TraceLogRetentionSize *float64 `json:"traceLogRetentionSize,omitempty" tf:"trace_log_retention_size,omitempty"` + TraceLogRetentionTime *float64 `json:"traceLogRetentionTime,omitempty" tf:"trace_log_retention_time,omitempty"` -TraceLogRetentionTime *float64 `json:"traceLogRetentionTime,omitempty" tf:"trace_log_retention_time,omitempty"` + UncompressedCacheSize *float64 `json:"uncompressedCacheSize,omitempty" tf:"uncompressed_cache_size,omitempty"` -UncompressedCacheSize *float64 `json:"uncompressedCacheSize,omitempty" tf:"uncompressed_cache_size,omitempty"` + ZookeeperLogEnabled *bool `json:"zookeeperLogEnabled,omitempty" tf:"zookeeper_log_enabled,omitempty"` -ZookeeperLogEnabled *bool `json:"zookeeperLogEnabled,omitempty" tf:"zookeeper_log_enabled,omitempty"` + ZookeeperLogRetentionSize *float64 `json:"zookeeperLogRetentionSize,omitempty" tf:"zookeeper_log_retention_size,omitempty"` -ZookeeperLogRetentionSize *float64 `json:"zookeeperLogRetentionSize,omitempty" tf:"zookeeper_log_retention_size,omitempty"` - -ZookeeperLogRetentionTime *float64 `json:"zookeeperLogRetentionTime,omitempty" tf:"zookeeper_log_retention_time,omitempty"` + ZookeeperLogRetentionTime *float64 `json:"zookeeperLogRetentionTime,omitempty" tf:"zookeeper_log_retention_time,omitempty"` } - type ConfigParameters struct { + // +kubebuilder:validation:Optional + AsynchronousInsertLogEnabled *bool `json:"asynchronousInsertLogEnabled,omitempty" tf:"asynchronous_insert_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -AsynchronousInsertLogEnabled *bool `json:"asynchronousInsertLogEnabled,omitempty" tf:"asynchronous_insert_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + AsynchronousInsertLogRetentionSize *float64 `json:"asynchronousInsertLogRetentionSize,omitempty" tf:"asynchronous_insert_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -AsynchronousInsertLogRetentionSize *float64 `json:"asynchronousInsertLogRetentionSize,omitempty" tf:"asynchronous_insert_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + AsynchronousInsertLogRetentionTime *float64 `json:"asynchronousInsertLogRetentionTime,omitempty" tf:"asynchronous_insert_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -AsynchronousInsertLogRetentionTime *float64 `json:"asynchronousInsertLogRetentionTime,omitempty" tf:"asynchronous_insert_log_retention_time,omitempty"` + // +kubebuilder:validation:Optional + AsynchronousMetricLogEnabled *bool `json:"asynchronousMetricLogEnabled,omitempty" tf:"asynchronous_metric_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -AsynchronousMetricLogEnabled *bool `json:"asynchronousMetricLogEnabled,omitempty" tf:"asynchronous_metric_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + AsynchronousMetricLogRetentionSize *float64 `json:"asynchronousMetricLogRetentionSize,omitempty" tf:"asynchronous_metric_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -AsynchronousMetricLogRetentionSize *float64 `json:"asynchronousMetricLogRetentionSize,omitempty" tf:"asynchronous_metric_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + AsynchronousMetricLogRetentionTime *float64 `json:"asynchronousMetricLogRetentionTime,omitempty" tf:"asynchronous_metric_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -AsynchronousMetricLogRetentionTime *float64 `json:"asynchronousMetricLogRetentionTime,omitempty" tf:"asynchronous_metric_log_retention_time,omitempty"` + // +kubebuilder:validation:Optional + BackgroundBufferFlushSchedulePoolSize *float64 `json:"backgroundBufferFlushSchedulePoolSize,omitempty" tf:"background_buffer_flush_schedule_pool_size,omitempty"` -// +kubebuilder:validation:Optional -BackgroundBufferFlushSchedulePoolSize *float64 `json:"backgroundBufferFlushSchedulePoolSize,omitempty" tf:"background_buffer_flush_schedule_pool_size,omitempty"` + // +kubebuilder:validation:Optional + BackgroundCommonPoolSize *float64 `json:"backgroundCommonPoolSize,omitempty" tf:"background_common_pool_size,omitempty"` -// +kubebuilder:validation:Optional -BackgroundCommonPoolSize *float64 `json:"backgroundCommonPoolSize,omitempty" tf:"background_common_pool_size,omitempty"` + // +kubebuilder:validation:Optional + BackgroundDistributedSchedulePoolSize *float64 `json:"backgroundDistributedSchedulePoolSize,omitempty" tf:"background_distributed_schedule_pool_size,omitempty"` -// +kubebuilder:validation:Optional -BackgroundDistributedSchedulePoolSize *float64 `json:"backgroundDistributedSchedulePoolSize,omitempty" tf:"background_distributed_schedule_pool_size,omitempty"` + // +kubebuilder:validation:Optional + BackgroundFetchesPoolSize *float64 `json:"backgroundFetchesPoolSize,omitempty" tf:"background_fetches_pool_size,omitempty"` -// +kubebuilder:validation:Optional -BackgroundFetchesPoolSize *float64 `json:"backgroundFetchesPoolSize,omitempty" tf:"background_fetches_pool_size,omitempty"` + // +kubebuilder:validation:Optional + BackgroundMergesMutationsConcurrencyRatio *float64 `json:"backgroundMergesMutationsConcurrencyRatio,omitempty" tf:"background_merges_mutations_concurrency_ratio,omitempty"` -// +kubebuilder:validation:Optional -BackgroundMergesMutationsConcurrencyRatio *float64 `json:"backgroundMergesMutationsConcurrencyRatio,omitempty" tf:"background_merges_mutations_concurrency_ratio,omitempty"` + // +kubebuilder:validation:Optional + BackgroundMessageBrokerSchedulePoolSize *float64 `json:"backgroundMessageBrokerSchedulePoolSize,omitempty" tf:"background_message_broker_schedule_pool_size,omitempty"` -// +kubebuilder:validation:Optional -BackgroundMessageBrokerSchedulePoolSize *float64 `json:"backgroundMessageBrokerSchedulePoolSize,omitempty" tf:"background_message_broker_schedule_pool_size,omitempty"` + // +kubebuilder:validation:Optional + BackgroundMovePoolSize *float64 `json:"backgroundMovePoolSize,omitempty" tf:"background_move_pool_size,omitempty"` -// +kubebuilder:validation:Optional -BackgroundMovePoolSize *float64 `json:"backgroundMovePoolSize,omitempty" tf:"background_move_pool_size,omitempty"` + // +kubebuilder:validation:Optional + BackgroundPoolSize *float64 `json:"backgroundPoolSize,omitempty" tf:"background_pool_size,omitempty"` -// +kubebuilder:validation:Optional -BackgroundPoolSize *float64 `json:"backgroundPoolSize,omitempty" tf:"background_pool_size,omitempty"` + // +kubebuilder:validation:Optional + BackgroundSchedulePoolSize *float64 `json:"backgroundSchedulePoolSize,omitempty" tf:"background_schedule_pool_size,omitempty"` -// +kubebuilder:validation:Optional -BackgroundSchedulePoolSize *float64 `json:"backgroundSchedulePoolSize,omitempty" tf:"background_schedule_pool_size,omitempty"` + // Data compression configuration. The structure is documented below. + // +kubebuilder:validation:Optional + Compression []CompressionParameters `json:"compression,omitempty" tf:"compression,omitempty"` -// Data compression configuration. The structure is documented below. -// +kubebuilder:validation:Optional -Compression []CompressionParameters `json:"compression,omitempty" tf:"compression,omitempty"` + // A database of the ClickHouse cluster. The structure is documented below. + // +kubebuilder:validation:Optional + DefaultDatabase *string `json:"defaultDatabase,omitempty" tf:"default_database,omitempty"` -// A database of the ClickHouse cluster. The structure is documented below. -// +kubebuilder:validation:Optional -DefaultDatabase *string `json:"defaultDatabase,omitempty" tf:"default_database,omitempty"` + // +kubebuilder:validation:Optional + DictionariesLazyLoad *bool `json:"dictionariesLazyLoad,omitempty" tf:"dictionaries_lazy_load,omitempty"` -// +kubebuilder:validation:Optional -DictionariesLazyLoad *bool `json:"dictionariesLazyLoad,omitempty" tf:"dictionaries_lazy_load,omitempty"` + // +kubebuilder:validation:Optional + GeobaseEnabled *bool `json:"geobaseEnabled,omitempty" tf:"geobase_enabled,omitempty"` -// +kubebuilder:validation:Optional -GeobaseEnabled *bool `json:"geobaseEnabled,omitempty" tf:"geobase_enabled,omitempty"` + // +kubebuilder:validation:Optional + GeobaseURI *string `json:"geobaseUri,omitempty" tf:"geobase_uri,omitempty"` -// +kubebuilder:validation:Optional -GeobaseURI *string `json:"geobaseUri,omitempty" tf:"geobase_uri,omitempty"` + // Graphite rollup configuration. The structure is documented below. + // +kubebuilder:validation:Optional + GraphiteRollup []GraphiteRollupParameters `json:"graphiteRollup,omitempty" tf:"graphite_rollup,omitempty"` -// Graphite rollup configuration. The structure is documented below. -// +kubebuilder:validation:Optional -GraphiteRollup []GraphiteRollupParameters `json:"graphiteRollup,omitempty" tf:"graphite_rollup,omitempty"` + // Kafka connection configuration. The structure is documented below. + // +kubebuilder:validation:Optional + Kafka []KafkaParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` -// Kafka connection configuration. The structure is documented below. -// +kubebuilder:validation:Optional -Kafka []KafkaParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` + // Kafka topic connection configuration. The structure is documented below. + // +kubebuilder:validation:Optional + KafkaTopic []KafkaTopicParameters `json:"kafkaTopic,omitempty" tf:"kafka_topic,omitempty"` -// Kafka topic connection configuration. The structure is documented below. -// +kubebuilder:validation:Optional -KafkaTopic []KafkaTopicParameters `json:"kafkaTopic,omitempty" tf:"kafka_topic,omitempty"` + // +kubebuilder:validation:Optional + KeepAliveTimeout *float64 `json:"keepAliveTimeout,omitempty" tf:"keep_alive_timeout,omitempty"` -// +kubebuilder:validation:Optional -KeepAliveTimeout *float64 `json:"keepAliveTimeout,omitempty" tf:"keep_alive_timeout,omitempty"` + // ClickHouse server parameters. For more information, see the official documentation. + // +kubebuilder:validation:Optional + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` -// ClickHouse server parameters. For more information, see the official documentation. -// +kubebuilder:validation:Optional -LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + // +kubebuilder:validation:Optional + MarkCacheSize *float64 `json:"markCacheSize,omitempty" tf:"mark_cache_size,omitempty"` -// +kubebuilder:validation:Optional -MarkCacheSize *float64 `json:"markCacheSize,omitempty" tf:"mark_cache_size,omitempty"` + // +kubebuilder:validation:Optional + MaxConcurrentQueries *float64 `json:"maxConcurrentQueries,omitempty" tf:"max_concurrent_queries,omitempty"` -// +kubebuilder:validation:Optional -MaxConcurrentQueries *float64 `json:"maxConcurrentQueries,omitempty" tf:"max_concurrent_queries,omitempty"` + // +kubebuilder:validation:Optional + MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` -// +kubebuilder:validation:Optional -MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` + // +kubebuilder:validation:Optional + MaxPartitionSizeToDrop *float64 `json:"maxPartitionSizeToDrop,omitempty" tf:"max_partition_size_to_drop,omitempty"` -// +kubebuilder:validation:Optional -MaxPartitionSizeToDrop *float64 `json:"maxPartitionSizeToDrop,omitempty" tf:"max_partition_size_to_drop,omitempty"` + // +kubebuilder:validation:Optional + MaxTableSizeToDrop *float64 `json:"maxTableSizeToDrop,omitempty" tf:"max_table_size_to_drop,omitempty"` -// +kubebuilder:validation:Optional -MaxTableSizeToDrop *float64 `json:"maxTableSizeToDrop,omitempty" tf:"max_table_size_to_drop,omitempty"` + // MergeTree engine configuration. The structure is documented below. + // +kubebuilder:validation:Optional + MergeTree []MergeTreeParameters `json:"mergeTree,omitempty" tf:"merge_tree,omitempty"` -// MergeTree engine configuration. The structure is documented below. -// +kubebuilder:validation:Optional -MergeTree []MergeTreeParameters `json:"mergeTree,omitempty" tf:"merge_tree,omitempty"` + // +kubebuilder:validation:Optional + MetricLogEnabled *bool `json:"metricLogEnabled,omitempty" tf:"metric_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -MetricLogEnabled *bool `json:"metricLogEnabled,omitempty" tf:"metric_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + MetricLogRetentionSize *float64 `json:"metricLogRetentionSize,omitempty" tf:"metric_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -MetricLogRetentionSize *float64 `json:"metricLogRetentionSize,omitempty" tf:"metric_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + MetricLogRetentionTime *float64 `json:"metricLogRetentionTime,omitempty" tf:"metric_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -MetricLogRetentionTime *float64 `json:"metricLogRetentionTime,omitempty" tf:"metric_log_retention_time,omitempty"` + // +kubebuilder:validation:Optional + OpentelemetrySpanLogEnabled *bool `json:"opentelemetrySpanLogEnabled,omitempty" tf:"opentelemetry_span_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -OpentelemetrySpanLogEnabled *bool `json:"opentelemetrySpanLogEnabled,omitempty" tf:"opentelemetry_span_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + OpentelemetrySpanLogRetentionSize *float64 `json:"opentelemetrySpanLogRetentionSize,omitempty" tf:"opentelemetry_span_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -OpentelemetrySpanLogRetentionSize *float64 `json:"opentelemetrySpanLogRetentionSize,omitempty" tf:"opentelemetry_span_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + OpentelemetrySpanLogRetentionTime *float64 `json:"opentelemetrySpanLogRetentionTime,omitempty" tf:"opentelemetry_span_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -OpentelemetrySpanLogRetentionTime *float64 `json:"opentelemetrySpanLogRetentionTime,omitempty" tf:"opentelemetry_span_log_retention_time,omitempty"` + // +kubebuilder:validation:Optional + PartLogRetentionSize *float64 `json:"partLogRetentionSize,omitempty" tf:"part_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -PartLogRetentionSize *float64 `json:"partLogRetentionSize,omitempty" tf:"part_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + PartLogRetentionTime *float64 `json:"partLogRetentionTime,omitempty" tf:"part_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -PartLogRetentionTime *float64 `json:"partLogRetentionTime,omitempty" tf:"part_log_retention_time,omitempty"` + // Query cache configuration. The structure is documented below. + // +kubebuilder:validation:Optional + QueryCache []QueryCacheParameters `json:"queryCache,omitempty" tf:"query_cache,omitempty"` -// Query cache configuration. The structure is documented below. -// +kubebuilder:validation:Optional -QueryCache []QueryCacheParameters `json:"queryCache,omitempty" tf:"query_cache,omitempty"` + // +kubebuilder:validation:Optional + QueryLogRetentionSize *float64 `json:"queryLogRetentionSize,omitempty" tf:"query_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -QueryLogRetentionSize *float64 `json:"queryLogRetentionSize,omitempty" tf:"query_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + QueryLogRetentionTime *float64 `json:"queryLogRetentionTime,omitempty" tf:"query_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -QueryLogRetentionTime *float64 `json:"queryLogRetentionTime,omitempty" tf:"query_log_retention_time,omitempty"` + // Query masking rules configuration. The structure is documented below. + // +kubebuilder:validation:Optional + QueryMaskingRules []QueryMaskingRulesParameters `json:"queryMaskingRules,omitempty" tf:"query_masking_rules,omitempty"` -// Query masking rules configuration. The structure is documented below. -// +kubebuilder:validation:Optional -QueryMaskingRules []QueryMaskingRulesParameters `json:"queryMaskingRules,omitempty" tf:"query_masking_rules,omitempty"` + // +kubebuilder:validation:Optional + QueryThreadLogEnabled *bool `json:"queryThreadLogEnabled,omitempty" tf:"query_thread_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -QueryThreadLogEnabled *bool `json:"queryThreadLogEnabled,omitempty" tf:"query_thread_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + QueryThreadLogRetentionSize *float64 `json:"queryThreadLogRetentionSize,omitempty" tf:"query_thread_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -QueryThreadLogRetentionSize *float64 `json:"queryThreadLogRetentionSize,omitempty" tf:"query_thread_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + QueryThreadLogRetentionTime *float64 `json:"queryThreadLogRetentionTime,omitempty" tf:"query_thread_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -QueryThreadLogRetentionTime *float64 `json:"queryThreadLogRetentionTime,omitempty" tf:"query_thread_log_retention_time,omitempty"` + // +kubebuilder:validation:Optional + QueryViewsLogEnabled *bool `json:"queryViewsLogEnabled,omitempty" tf:"query_views_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -QueryViewsLogEnabled *bool `json:"queryViewsLogEnabled,omitempty" tf:"query_views_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + QueryViewsLogRetentionSize *float64 `json:"queryViewsLogRetentionSize,omitempty" tf:"query_views_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -QueryViewsLogRetentionSize *float64 `json:"queryViewsLogRetentionSize,omitempty" tf:"query_views_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + QueryViewsLogRetentionTime *float64 `json:"queryViewsLogRetentionTime,omitempty" tf:"query_views_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -QueryViewsLogRetentionTime *float64 `json:"queryViewsLogRetentionTime,omitempty" tf:"query_views_log_retention_time,omitempty"` + // RabbitMQ connection configuration. The structure is documented below. + // +kubebuilder:validation:Optional + Rabbitmq []RabbitmqParameters `json:"rabbitmq,omitempty" tf:"rabbitmq,omitempty"` -// RabbitMQ connection configuration. The structure is documented below. -// +kubebuilder:validation:Optional -Rabbitmq []RabbitmqParameters `json:"rabbitmq,omitempty" tf:"rabbitmq,omitempty"` + // +kubebuilder:validation:Optional + SessionLogEnabled *bool `json:"sessionLogEnabled,omitempty" tf:"session_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -SessionLogEnabled *bool `json:"sessionLogEnabled,omitempty" tf:"session_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + SessionLogRetentionSize *float64 `json:"sessionLogRetentionSize,omitempty" tf:"session_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -SessionLogRetentionSize *float64 `json:"sessionLogRetentionSize,omitempty" tf:"session_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + SessionLogRetentionTime *float64 `json:"sessionLogRetentionTime,omitempty" tf:"session_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -SessionLogRetentionTime *float64 `json:"sessionLogRetentionTime,omitempty" tf:"session_log_retention_time,omitempty"` + // +kubebuilder:validation:Optional + TextLogEnabled *bool `json:"textLogEnabled,omitempty" tf:"text_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -TextLogEnabled *bool `json:"textLogEnabled,omitempty" tf:"text_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + TextLogLevel *string `json:"textLogLevel,omitempty" tf:"text_log_level,omitempty"` -// +kubebuilder:validation:Optional -TextLogLevel *string `json:"textLogLevel,omitempty" tf:"text_log_level,omitempty"` + // +kubebuilder:validation:Optional + TextLogRetentionSize *float64 `json:"textLogRetentionSize,omitempty" tf:"text_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -TextLogRetentionSize *float64 `json:"textLogRetentionSize,omitempty" tf:"text_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + TextLogRetentionTime *float64 `json:"textLogRetentionTime,omitempty" tf:"text_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -TextLogRetentionTime *float64 `json:"textLogRetentionTime,omitempty" tf:"text_log_retention_time,omitempty"` + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` -// +kubebuilder:validation:Optional -Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + // +kubebuilder:validation:Optional + TotalMemoryProfilerStep *float64 `json:"totalMemoryProfilerStep,omitempty" tf:"total_memory_profiler_step,omitempty"` -// +kubebuilder:validation:Optional -TotalMemoryProfilerStep *float64 `json:"totalMemoryProfilerStep,omitempty" tf:"total_memory_profiler_step,omitempty"` + // +kubebuilder:validation:Optional + TraceLogEnabled *bool `json:"traceLogEnabled,omitempty" tf:"trace_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -TraceLogEnabled *bool `json:"traceLogEnabled,omitempty" tf:"trace_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + TraceLogRetentionSize *float64 `json:"traceLogRetentionSize,omitempty" tf:"trace_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -TraceLogRetentionSize *float64 `json:"traceLogRetentionSize,omitempty" tf:"trace_log_retention_size,omitempty"` + // +kubebuilder:validation:Optional + TraceLogRetentionTime *float64 `json:"traceLogRetentionTime,omitempty" tf:"trace_log_retention_time,omitempty"` -// +kubebuilder:validation:Optional -TraceLogRetentionTime *float64 `json:"traceLogRetentionTime,omitempty" tf:"trace_log_retention_time,omitempty"` + // +kubebuilder:validation:Optional + UncompressedCacheSize *float64 `json:"uncompressedCacheSize,omitempty" tf:"uncompressed_cache_size,omitempty"` -// +kubebuilder:validation:Optional -UncompressedCacheSize *float64 `json:"uncompressedCacheSize,omitempty" tf:"uncompressed_cache_size,omitempty"` + // +kubebuilder:validation:Optional + ZookeeperLogEnabled *bool `json:"zookeeperLogEnabled,omitempty" tf:"zookeeper_log_enabled,omitempty"` -// +kubebuilder:validation:Optional -ZookeeperLogEnabled *bool `json:"zookeeperLogEnabled,omitempty" tf:"zookeeper_log_enabled,omitempty"` + // +kubebuilder:validation:Optional + ZookeeperLogRetentionSize *float64 `json:"zookeeperLogRetentionSize,omitempty" tf:"zookeeper_log_retention_size,omitempty"` -// +kubebuilder:validation:Optional -ZookeeperLogRetentionSize *float64 `json:"zookeeperLogRetentionSize,omitempty" tf:"zookeeper_log_retention_size,omitempty"` - -// +kubebuilder:validation:Optional -ZookeeperLogRetentionTime *float64 `json:"zookeeperLogRetentionTime,omitempty" tf:"zookeeper_log_retention_time,omitempty"` + // +kubebuilder:validation:Optional + ZookeeperLogRetentionTime *float64 `json:"zookeeperLogRetentionTime,omitempty" tf:"zookeeper_log_retention_time,omitempty"` } - type DatabaseInitParameters struct { - -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type DatabaseObservation struct { - -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type DatabaseParameters struct { - -// The name of the database. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // The name of the database. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` } - type FormatSchemaInitParameters struct { + // The name of the format schema. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the format schema. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Type of the format schema. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Type of the format schema. -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// Format schema file URL. You can only use format schemas stored in Yandex Object Storage. -URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + // Format schema file URL. You can only use format schemas stored in Yandex Object Storage. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` } - type FormatSchemaObservation struct { + // The name of the format schema. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the format schema. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Type of the format schema. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of the format schema. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Format schema file URL. You can only use format schemas stored in Yandex Object Storage. -URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + // Format schema file URL. You can only use format schemas stored in Yandex Object Storage. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` } - type FormatSchemaParameters struct { + // The name of the format schema. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of the format schema. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// Type of the format schema. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // Type of the format schema. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` -// Format schema file URL. You can only use format schemas stored in Yandex Object Storage. -// +kubebuilder:validation:Optional -URI *string `json:"uri" tf:"uri,omitempty"` + // Format schema file URL. You can only use format schemas stored in Yandex Object Storage. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` } - type GraphiteRollupInitParameters struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// The name of the column storing the metric name (Graphite sensor). Default value: Path. -PathColumnName *string `json:"pathColumnName,omitempty" tf:"path_column_name,omitempty"` + // The name of the column storing the metric name (Graphite sensor). Default value: Path. + PathColumnName *string `json:"pathColumnName,omitempty" tf:"path_column_name,omitempty"` -// Set of thinning rules. -Pattern []PatternInitParameters `json:"pattern,omitempty" tf:"pattern,omitempty"` + // Set of thinning rules. + Pattern []PatternInitParameters `json:"pattern,omitempty" tf:"pattern,omitempty"` -// The name of the column storing the time of measuring the metric. Default value: Time. -TimeColumnName *string `json:"timeColumnName,omitempty" tf:"time_column_name,omitempty"` + // The name of the column storing the time of measuring the metric. Default value: Time. + TimeColumnName *string `json:"timeColumnName,omitempty" tf:"time_column_name,omitempty"` -// The name of the column storing the value of the metric at the time set in time_column_name. Default value: Value. -ValueColumnName *string `json:"valueColumnName,omitempty" tf:"value_column_name,omitempty"` + // The name of the column storing the value of the metric at the time set in time_column_name. Default value: Value. + ValueColumnName *string `json:"valueColumnName,omitempty" tf:"value_column_name,omitempty"` -// The name of the column storing the version of the metric. Default value: Timestamp. -VersionColumnName *string `json:"versionColumnName,omitempty" tf:"version_column_name,omitempty"` + // The name of the column storing the version of the metric. Default value: Timestamp. + VersionColumnName *string `json:"versionColumnName,omitempty" tf:"version_column_name,omitempty"` } - type GraphiteRollupObservation struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// The name of the column storing the metric name (Graphite sensor). Default value: Path. -PathColumnName *string `json:"pathColumnName,omitempty" tf:"path_column_name,omitempty"` + // The name of the column storing the metric name (Graphite sensor). Default value: Path. + PathColumnName *string `json:"pathColumnName,omitempty" tf:"path_column_name,omitempty"` -// Set of thinning rules. -Pattern []PatternObservation `json:"pattern,omitempty" tf:"pattern,omitempty"` + // Set of thinning rules. + Pattern []PatternObservation `json:"pattern,omitempty" tf:"pattern,omitempty"` -// The name of the column storing the time of measuring the metric. Default value: Time. -TimeColumnName *string `json:"timeColumnName,omitempty" tf:"time_column_name,omitempty"` + // The name of the column storing the time of measuring the metric. Default value: Time. + TimeColumnName *string `json:"timeColumnName,omitempty" tf:"time_column_name,omitempty"` -// The name of the column storing the value of the metric at the time set in time_column_name. Default value: Value. -ValueColumnName *string `json:"valueColumnName,omitempty" tf:"value_column_name,omitempty"` + // The name of the column storing the value of the metric at the time set in time_column_name. Default value: Value. + ValueColumnName *string `json:"valueColumnName,omitempty" tf:"value_column_name,omitempty"` -// The name of the column storing the version of the metric. Default value: Timestamp. -VersionColumnName *string `json:"versionColumnName,omitempty" tf:"version_column_name,omitempty"` + // The name of the column storing the version of the metric. Default value: Timestamp. + VersionColumnName *string `json:"versionColumnName,omitempty" tf:"version_column_name,omitempty"` } - type GraphiteRollupParameters struct { + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // The name of the column storing the metric name (Graphite sensor). Default value: Path. + // +kubebuilder:validation:Optional + PathColumnName *string `json:"pathColumnName,omitempty" tf:"path_column_name,omitempty"` -// The name of the column storing the metric name (Graphite sensor). Default value: Path. -// +kubebuilder:validation:Optional -PathColumnName *string `json:"pathColumnName,omitempty" tf:"path_column_name,omitempty"` + // Set of thinning rules. + // +kubebuilder:validation:Optional + Pattern []PatternParameters `json:"pattern,omitempty" tf:"pattern,omitempty"` -// Set of thinning rules. -// +kubebuilder:validation:Optional -Pattern []PatternParameters `json:"pattern,omitempty" tf:"pattern,omitempty"` + // The name of the column storing the time of measuring the metric. Default value: Time. + // +kubebuilder:validation:Optional + TimeColumnName *string `json:"timeColumnName,omitempty" tf:"time_column_name,omitempty"` -// The name of the column storing the time of measuring the metric. Default value: Time. -// +kubebuilder:validation:Optional -TimeColumnName *string `json:"timeColumnName,omitempty" tf:"time_column_name,omitempty"` + // The name of the column storing the value of the metric at the time set in time_column_name. Default value: Value. + // +kubebuilder:validation:Optional + ValueColumnName *string `json:"valueColumnName,omitempty" tf:"value_column_name,omitempty"` -// The name of the column storing the value of the metric at the time set in time_column_name. Default value: Value. -// +kubebuilder:validation:Optional -ValueColumnName *string `json:"valueColumnName,omitempty" tf:"value_column_name,omitempty"` - -// The name of the column storing the version of the metric. Default value: Timestamp. -// +kubebuilder:validation:Optional -VersionColumnName *string `json:"versionColumnName,omitempty" tf:"version_column_name,omitempty"` + // The name of the column storing the version of the metric. Default value: Timestamp. + // +kubebuilder:validation:Optional + VersionColumnName *string `json:"versionColumnName,omitempty" tf:"version_column_name,omitempty"` } - type HostInitParameters struct { + // Sets whether the host should get a public IP address on creation. Can be either true or false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Sets whether the host should get a public IP address on creation. Can be either true or false. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` - -// The name of the shard to which the host belongs. -ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + // The name of the shard to which the host belongs. + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// The type of the host to be deployed. Can be either CLICKHOUSE or ZOOKEEPER. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // The type of the host to be deployed. Can be either CLICKHOUSE or ZOOKEEPER. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// The availability zone where the ClickHouse host will be created. For more information see the official documentation. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // The availability zone where the ClickHouse host will be created. For more information see the official documentation. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type HostObservation struct { + // Sets whether the host should get a public IP address on creation. Can be either true or false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Sets whether the host should get a public IP address on creation. Can be either true or false. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` - -// (Computed) The fully qualified domain name of the host. -Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + // (Computed) The fully qualified domain name of the host. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` -// The name of the shard to which the host belongs. -ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + // The name of the shard to which the host belongs. + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The type of the host to be deployed. Can be either CLICKHOUSE or ZOOKEEPER. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // The type of the host to be deployed. Can be either CLICKHOUSE or ZOOKEEPER. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// The availability zone where the ClickHouse host will be created. For more information see the official documentation. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // The availability zone where the ClickHouse host will be created. For more information see the official documentation. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type HostParameters struct { + // Sets whether the host should get a public IP address on creation. Can be either true or false. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Sets whether the host should get a public IP address on creation. Can be either true or false. -// +kubebuilder:validation:Optional -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // The name of the shard to which the host belongs. + // +kubebuilder:validation:Optional + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` -// The name of the shard to which the host belongs. -// +kubebuilder:validation:Optional -ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // The type of the host to be deployed. Can be either CLICKHOUSE or ZOOKEEPER. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` -// The type of the host to be deployed. Can be either CLICKHOUSE or ZOOKEEPER. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` - -// The availability zone where the ClickHouse host will be created. For more information see the official documentation. -// +kubebuilder:validation:Optional -Zone *string `json:"zone" tf:"zone,omitempty"` + // The availability zone where the ClickHouse host will be created. For more information see the official documentation. + // +kubebuilder:validation:Optional + Zone *string `json:"zone" tf:"zone,omitempty"` } - type KafkaInitParameters struct { + // Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. + AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` -// Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. -AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` + // A comma-separated list of debug contexts to enable. + Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` -// A comma-separated list of debug contexts to enable. -Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` + // enable verification of SSL certificates. + EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` -// enable verification of SSL certificates. -EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` + // Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. + MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` -// Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. -MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` + // SASL mechanism used in kafka authentication. + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// SASL mechanism used in kafka authentication. -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // User password on kafka server. + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` -// User password on kafka server. -SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + // Username on kafka server. + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Username on kafka server. -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + // Security protocol used to connect to kafka server. + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` -// Security protocol used to connect to kafka server. -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` - -// Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. -SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` + // Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. + SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` } - type KafkaObservation struct { + // Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. + AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` -// Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. -AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` + // A comma-separated list of debug contexts to enable. + Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` -// A comma-separated list of debug contexts to enable. -Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` + // enable verification of SSL certificates. + EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` -// enable verification of SSL certificates. -EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` + // Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. + MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` -// Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. -MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` + // SASL mechanism used in kafka authentication. + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// SASL mechanism used in kafka authentication. -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // Username on kafka server. + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Username on kafka server. -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + // Security protocol used to connect to kafka server. + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` -// Security protocol used to connect to kafka server. -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` - -// Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. -SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` + // Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. + SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` } - type KafkaParameters struct { + // Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. + // +kubebuilder:validation:Optional + AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` -// Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. -// +kubebuilder:validation:Optional -AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` - -// A comma-separated list of debug contexts to enable. -// +kubebuilder:validation:Optional -Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` + // A comma-separated list of debug contexts to enable. + // +kubebuilder:validation:Optional + Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` -// enable verification of SSL certificates. -// +kubebuilder:validation:Optional -EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` + // enable verification of SSL certificates. + // +kubebuilder:validation:Optional + EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` -// Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. -// +kubebuilder:validation:Optional -MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` + // Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. + // +kubebuilder:validation:Optional + MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` -// SASL mechanism used in kafka authentication. -// +kubebuilder:validation:Optional -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // SASL mechanism used in kafka authentication. + // +kubebuilder:validation:Optional + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// User password on kafka server. -// +kubebuilder:validation:Optional -SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + // User password on kafka server. + // +kubebuilder:validation:Optional + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` -// Username on kafka server. -// +kubebuilder:validation:Optional -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + // Username on kafka server. + // +kubebuilder:validation:Optional + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Security protocol used to connect to kafka server. -// +kubebuilder:validation:Optional -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + // Security protocol used to connect to kafka server. + // +kubebuilder:validation:Optional + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` -// Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. -// +kubebuilder:validation:Optional -SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` + // Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. + // +kubebuilder:validation:Optional + SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` } - type KafkaTopicInitParameters struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Custom settings for user. The list is documented below. -Settings []SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` + // Custom settings for user. The list is documented below. + Settings []SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` } - type KafkaTopicObservation struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Custom settings for user. The list is documented below. -Settings []SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` + // Custom settings for user. The list is documented below. + Settings []SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` } - type KafkaTopicParameters struct { + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// Custom settings for user. The list is documented below. -// +kubebuilder:validation:Optional -Settings []SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` + // Custom settings for user. The list is documented below. + // +kubebuilder:validation:Optional + Settings []SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` } - type MLModelInitParameters struct { + // The name of the ml model. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the ml model. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Type of the model. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of the model. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Model file URL. You can only use models stored in Yandex Object Storage. -URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + // Model file URL. You can only use models stored in Yandex Object Storage. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` } - type MLModelObservation struct { + // The name of the ml model. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the ml model. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Type of the model. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Type of the model. -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// Model file URL. You can only use models stored in Yandex Object Storage. -URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + // Model file URL. You can only use models stored in Yandex Object Storage. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` } - type MLModelParameters struct { + // The name of the ml model. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of the ml model. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// Type of the model. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // Type of the model. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` -// Model file URL. You can only use models stored in Yandex Object Storage. -// +kubebuilder:validation:Optional -URI *string `json:"uri" tf:"uri,omitempty"` + // Model file URL. You can only use models stored in Yandex Object Storage. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` } - type MaintenanceWindowInitParameters struct { + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type MaintenanceWindowObservation struct { + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. -Day *string `json:"day,omitempty" tf:"day,omitempty"` + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` - -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type MaintenanceWindowParameters struct { + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. -// +kubebuilder:validation:Optional -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. -// +kubebuilder:validation:Optional -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` } - type MergeTreeInitParameters struct { + // When this setting has a value greater than zero only a single replica starts the merge immediately if merged part on shared storage and allow_remote_fs_zero_copy_replication is enabled. + AllowRemoteFsZeroCopyReplication *bool `json:"allowRemoteFsZeroCopyReplication,omitempty" tf:"allow_remote_fs_zero_copy_replication,omitempty"` -// When this setting has a value greater than zero only a single replica starts the merge immediately if merged part on shared storage and allow_remote_fs_zero_copy_replication is enabled. -AllowRemoteFsZeroCopyReplication *bool `json:"allowRemoteFsZeroCopyReplication,omitempty" tf:"allow_remote_fs_zero_copy_replication,omitempty"` - -// Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned integer types: UInt8, UInt16, UInt32, UInt64. Default value: true. -CheckSampleColumnIsCorrect *bool `json:"checkSampleColumnIsCorrect,omitempty" tf:"check_sample_column_is_correct,omitempty"` + // Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned integer types: UInt8, UInt16, UInt32, UInt64. Default value: true. + CheckSampleColumnIsCorrect *bool `json:"checkSampleColumnIsCorrect,omitempty" tf:"check_sample_column_is_correct,omitempty"` -// Minimum period to clean old queue logs, blocks hashes and parts. -CleanupDelayPeriod *float64 `json:"cleanupDelayPeriod,omitempty" tf:"cleanup_delay_period,omitempty"` + // Minimum period to clean old queue logs, blocks hashes and parts. + CleanupDelayPeriod *float64 `json:"cleanupDelayPeriod,omitempty" tf:"cleanup_delay_period,omitempty"` -// If the number of inactive parts in a single partition in the table at least that many the inactive_parts_to_delay_insert value, an INSERT artificially slows down. It is useful when a server fails to clean up parts quickly enough. -InactivePartsToDelayInsert *float64 `json:"inactivePartsToDelayInsert,omitempty" tf:"inactive_parts_to_delay_insert,omitempty"` + // If the number of inactive parts in a single partition in the table at least that many the inactive_parts_to_delay_insert value, an INSERT artificially slows down. It is useful when a server fails to clean up parts quickly enough. + InactivePartsToDelayInsert *float64 `json:"inactivePartsToDelayInsert,omitempty" tf:"inactive_parts_to_delay_insert,omitempty"` -// If the number of inactive parts in a single partition more than the inactive_parts_to_throw_insert value, INSERT is interrupted with the "Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts" exception. -InactivePartsToThrowInsert *float64 `json:"inactivePartsToThrowInsert,omitempty" tf:"inactive_parts_to_throw_insert,omitempty"` + // If the number of inactive parts in a single partition more than the inactive_parts_to_throw_insert value, INSERT is interrupted with the "Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts" exception. + InactivePartsToThrowInsert *float64 `json:"inactivePartsToThrowInsert,omitempty" tf:"inactive_parts_to_throw_insert,omitempty"` -// The too many parts check according to parts_to_delay_insert and parts_to_throw_insert will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts. -MaxAvgPartSizeForTooManyParts *float64 `json:"maxAvgPartSizeForTooManyParts,omitempty" tf:"max_avg_part_size_for_too_many_parts,omitempty"` + // The too many parts check according to parts_to_delay_insert and parts_to_throw_insert will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts. + MaxAvgPartSizeForTooManyParts *float64 `json:"maxAvgPartSizeForTooManyParts,omitempty" tf:"max_avg_part_size_for_too_many_parts,omitempty"` -// The maximum total parts size (in bytes) to be merged into one part, if there are enough resources available. max_bytes_to_merge_at_max_space_in_pool -- roughly corresponds to the maximum possible part size created by an automatic background merge. -MaxBytesToMergeAtMaxSpaceInPool *float64 `json:"maxBytesToMergeAtMaxSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_max_space_in_pool,omitempty"` + // The maximum total parts size (in bytes) to be merged into one part, if there are enough resources available. max_bytes_to_merge_at_max_space_in_pool -- roughly corresponds to the maximum possible part size created by an automatic background merge. + MaxBytesToMergeAtMaxSpaceInPool *float64 `json:"maxBytesToMergeAtMaxSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_max_space_in_pool,omitempty"` -// Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum. -MaxBytesToMergeAtMinSpaceInPool *float64 `json:"maxBytesToMergeAtMinSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_min_space_in_pool,omitempty"` + // Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum. + MaxBytesToMergeAtMinSpaceInPool *float64 `json:"maxBytesToMergeAtMinSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_min_space_in_pool,omitempty"` -// Maximum period to clean old queue logs, blocks hashes and parts. Default value: 300 seconds. -MaxCleanupDelayPeriod *float64 `json:"maxCleanupDelayPeriod,omitempty" tf:"max_cleanup_delay_period,omitempty"` + // Maximum period to clean old queue logs, blocks hashes and parts. Default value: 300 seconds. + MaxCleanupDelayPeriod *float64 `json:"maxCleanupDelayPeriod,omitempty" tf:"max_cleanup_delay_period,omitempty"` -// Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters. Default value: 60000 milliseconds (60 seconds). -MaxMergeSelectingSleepMs *float64 `json:"maxMergeSelectingSleepMs,omitempty" tf:"max_merge_selecting_sleep_ms,omitempty"` + // Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters. Default value: 60000 milliseconds (60 seconds). + MaxMergeSelectingSleepMs *float64 `json:"maxMergeSelectingSleepMs,omitempty" tf:"max_merge_selecting_sleep_ms,omitempty"` -// When there is more than specified number of merges with TTL entries in pool, do not assign new merge with TTL. -MaxNumberOfMergesWithTTLInPool *float64 `json:"maxNumberOfMergesWithTtlInPool,omitempty" tf:"max_number_of_merges_with_ttl_in_pool,omitempty"` + // When there is more than specified number of merges with TTL entries in pool, do not assign new merge with TTL. + MaxNumberOfMergesWithTTLInPool *float64 `json:"maxNumberOfMergesWithTtlInPool,omitempty" tf:"max_number_of_merges_with_ttl_in_pool,omitempty"` -// Maximum number of parts in all partitions. -MaxPartsInTotal *float64 `json:"maxPartsInTotal,omitempty" tf:"max_parts_in_total,omitempty"` + // Maximum number of parts in all partitions. + MaxPartsInTotal *float64 `json:"maxPartsInTotal,omitempty" tf:"max_parts_in_total,omitempty"` -// Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time. -MaxReplicatedMergesInQueue *float64 `json:"maxReplicatedMergesInQueue,omitempty" tf:"max_replicated_merges_in_queue,omitempty"` + // Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time. + MaxReplicatedMergesInQueue *float64 `json:"maxReplicatedMergesInQueue,omitempty" tf:"max_replicated_merges_in_queue,omitempty"` -// The number of rows that are read from the merged parts into memory. Default value: 8192. -MergeMaxBlockSize *float64 `json:"mergeMaxBlockSize,omitempty" tf:"merge_max_block_size,omitempty"` + // The number of rows that are read from the merged parts into memory. Default value: 8192. + MergeMaxBlockSize *float64 `json:"mergeMaxBlockSize,omitempty" tf:"merge_max_block_size,omitempty"` -// Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters. -MergeSelectingSleepMs *float64 `json:"mergeSelectingSleepMs,omitempty" tf:"merge_selecting_sleep_ms,omitempty"` + // Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters. + MergeSelectingSleepMs *float64 `json:"mergeSelectingSleepMs,omitempty" tf:"merge_selecting_sleep_ms,omitempty"` -// Minimum delay in seconds before repeating a merge with recompression TTL. Default value: 14400 seconds (4 hours). -MergeWithRecompressionTTLTimeout *float64 `json:"mergeWithRecompressionTtlTimeout,omitempty" tf:"merge_with_recompression_ttl_timeout,omitempty"` + // Minimum delay in seconds before repeating a merge with recompression TTL. Default value: 14400 seconds (4 hours). + MergeWithRecompressionTTLTimeout *float64 `json:"mergeWithRecompressionTtlTimeout,omitempty" tf:"merge_with_recompression_ttl_timeout,omitempty"` -// Minimum delay in seconds before repeating a merge with delete TTL. Default value: 14400 seconds (4 hours). -MergeWithTTLTimeout *float64 `json:"mergeWithTtlTimeout,omitempty" tf:"merge_with_ttl_timeout,omitempty"` + // Minimum delay in seconds before repeating a merge with delete TTL. Default value: 14400 seconds (4 hours). + MergeWithTTLTimeout *float64 `json:"mergeWithTtlTimeout,omitempty" tf:"merge_with_ttl_timeout,omitempty"` -// Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset. -MinAgeToForceMergeOnPartitionOnly *bool `json:"minAgeToForceMergeOnPartitionOnly,omitempty" tf:"min_age_to_force_merge_on_partition_only,omitempty"` + // Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset. + MinAgeToForceMergeOnPartitionOnly *bool `json:"minAgeToForceMergeOnPartitionOnly,omitempty" tf:"min_age_to_force_merge_on_partition_only,omitempty"` -// Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds. -MinAgeToForceMergeSeconds *float64 `json:"minAgeToForceMergeSeconds,omitempty" tf:"min_age_to_force_merge_seconds,omitempty"` + // Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds. + MinAgeToForceMergeSeconds *float64 `json:"minAgeToForceMergeSeconds,omitempty" tf:"min_age_to_force_merge_seconds,omitempty"` -// Minimum number of bytes in a data part that can be stored in Wide format. You can set one, both or none of these settings. -MinBytesForWidePart *float64 `json:"minBytesForWidePart,omitempty" tf:"min_bytes_for_wide_part,omitempty"` + // Minimum number of bytes in a data part that can be stored in Wide format. You can set one, both or none of these settings. + MinBytesForWidePart *float64 `json:"minBytesForWidePart,omitempty" tf:"min_bytes_for_wide_part,omitempty"` -// Minimum number of rows in a data part that can be stored in Wide format. You can set one, both or none of these settings. -MinRowsForWidePart *float64 `json:"minRowsForWidePart,omitempty" tf:"min_rows_for_wide_part,omitempty"` + // Minimum number of rows in a data part that can be stored in Wide format. You can set one, both or none of these settings. + MinRowsForWidePart *float64 `json:"minRowsForWidePart,omitempty" tf:"min_rows_for_wide_part,omitempty"` -// When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid "Too many parts". Default value: 20. -NumberOfFreeEntriesInPoolToExecuteMutation *float64 `json:"numberOfFreeEntriesInPoolToExecuteMutation,omitempty" tf:"number_of_free_entries_in_pool_to_execute_mutation,omitempty"` + // When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid "Too many parts". Default value: 20. + NumberOfFreeEntriesInPoolToExecuteMutation *float64 `json:"numberOfFreeEntriesInPoolToExecuteMutation,omitempty" tf:"number_of_free_entries_in_pool_to_execute_mutation,omitempty"` -// Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges. -NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge *float64 `json:"numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge,omitempty" tf:"number_of_free_entries_in_pool_to_lower_max_size_of_merge,omitempty"` + // Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges. + NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge *float64 `json:"numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge,omitempty" tf:"number_of_free_entries_in_pool_to_lower_max_size_of_merge,omitempty"` -// Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table. -PartsToDelayInsert *float64 `json:"partsToDelayInsert,omitempty" tf:"parts_to_delay_insert,omitempty"` + // Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table. + PartsToDelayInsert *float64 `json:"partsToDelayInsert,omitempty" tf:"parts_to_delay_insert,omitempty"` -// Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception. -PartsToThrowInsert *float64 `json:"partsToThrowInsert,omitempty" tf:"parts_to_throw_insert,omitempty"` + // Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception. + PartsToThrowInsert *float64 `json:"partsToThrowInsert,omitempty" tf:"parts_to_throw_insert,omitempty"` -// Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted). -ReplicatedDeduplicationWindow *float64 `json:"replicatedDeduplicationWindow,omitempty" tf:"replicated_deduplication_window,omitempty"` + // Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted). + ReplicatedDeduplicationWindow *float64 `json:"replicatedDeduplicationWindow,omitempty" tf:"replicated_deduplication_window,omitempty"` -// Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted). -ReplicatedDeduplicationWindowSeconds *float64 `json:"replicatedDeduplicationWindowSeconds,omitempty" tf:"replicated_deduplication_window_seconds,omitempty"` + // Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted). + ReplicatedDeduplicationWindowSeconds *float64 `json:"replicatedDeduplicationWindowSeconds,omitempty" tf:"replicated_deduplication_window_seconds,omitempty"` -// Enables zero-copy replication when a replica is located on a remote filesystem. -TTLOnlyDropParts *bool `json:"ttlOnlyDropParts,omitempty" tf:"ttl_only_drop_parts,omitempty"` + // Enables zero-copy replication when a replica is located on a remote filesystem. + TTLOnlyDropParts *bool `json:"ttlOnlyDropParts,omitempty" tf:"ttl_only_drop_parts,omitempty"` } - type MergeTreeObservation struct { + // When this setting has a value greater than zero only a single replica starts the merge immediately if merged part on shared storage and allow_remote_fs_zero_copy_replication is enabled. + AllowRemoteFsZeroCopyReplication *bool `json:"allowRemoteFsZeroCopyReplication,omitempty" tf:"allow_remote_fs_zero_copy_replication,omitempty"` -// When this setting has a value greater than zero only a single replica starts the merge immediately if merged part on shared storage and allow_remote_fs_zero_copy_replication is enabled. -AllowRemoteFsZeroCopyReplication *bool `json:"allowRemoteFsZeroCopyReplication,omitempty" tf:"allow_remote_fs_zero_copy_replication,omitempty"` - -// Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned integer types: UInt8, UInt16, UInt32, UInt64. Default value: true. -CheckSampleColumnIsCorrect *bool `json:"checkSampleColumnIsCorrect,omitempty" tf:"check_sample_column_is_correct,omitempty"` + // Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned integer types: UInt8, UInt16, UInt32, UInt64. Default value: true. + CheckSampleColumnIsCorrect *bool `json:"checkSampleColumnIsCorrect,omitempty" tf:"check_sample_column_is_correct,omitempty"` -// Minimum period to clean old queue logs, blocks hashes and parts. -CleanupDelayPeriod *float64 `json:"cleanupDelayPeriod,omitempty" tf:"cleanup_delay_period,omitempty"` + // Minimum period to clean old queue logs, blocks hashes and parts. + CleanupDelayPeriod *float64 `json:"cleanupDelayPeriod,omitempty" tf:"cleanup_delay_period,omitempty"` -// If the number of inactive parts in a single partition in the table at least that many the inactive_parts_to_delay_insert value, an INSERT artificially slows down. It is useful when a server fails to clean up parts quickly enough. -InactivePartsToDelayInsert *float64 `json:"inactivePartsToDelayInsert,omitempty" tf:"inactive_parts_to_delay_insert,omitempty"` + // If the number of inactive parts in a single partition in the table at least that many the inactive_parts_to_delay_insert value, an INSERT artificially slows down. It is useful when a server fails to clean up parts quickly enough. + InactivePartsToDelayInsert *float64 `json:"inactivePartsToDelayInsert,omitempty" tf:"inactive_parts_to_delay_insert,omitempty"` -// If the number of inactive parts in a single partition more than the inactive_parts_to_throw_insert value, INSERT is interrupted with the "Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts" exception. -InactivePartsToThrowInsert *float64 `json:"inactivePartsToThrowInsert,omitempty" tf:"inactive_parts_to_throw_insert,omitempty"` + // If the number of inactive parts in a single partition more than the inactive_parts_to_throw_insert value, INSERT is interrupted with the "Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts" exception. + InactivePartsToThrowInsert *float64 `json:"inactivePartsToThrowInsert,omitempty" tf:"inactive_parts_to_throw_insert,omitempty"` -// The too many parts check according to parts_to_delay_insert and parts_to_throw_insert will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts. -MaxAvgPartSizeForTooManyParts *float64 `json:"maxAvgPartSizeForTooManyParts,omitempty" tf:"max_avg_part_size_for_too_many_parts,omitempty"` + // The too many parts check according to parts_to_delay_insert and parts_to_throw_insert will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts. + MaxAvgPartSizeForTooManyParts *float64 `json:"maxAvgPartSizeForTooManyParts,omitempty" tf:"max_avg_part_size_for_too_many_parts,omitempty"` -// The maximum total parts size (in bytes) to be merged into one part, if there are enough resources available. max_bytes_to_merge_at_max_space_in_pool -- roughly corresponds to the maximum possible part size created by an automatic background merge. -MaxBytesToMergeAtMaxSpaceInPool *float64 `json:"maxBytesToMergeAtMaxSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_max_space_in_pool,omitempty"` + // The maximum total parts size (in bytes) to be merged into one part, if there are enough resources available. max_bytes_to_merge_at_max_space_in_pool -- roughly corresponds to the maximum possible part size created by an automatic background merge. + MaxBytesToMergeAtMaxSpaceInPool *float64 `json:"maxBytesToMergeAtMaxSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_max_space_in_pool,omitempty"` -// Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum. -MaxBytesToMergeAtMinSpaceInPool *float64 `json:"maxBytesToMergeAtMinSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_min_space_in_pool,omitempty"` + // Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum. + MaxBytesToMergeAtMinSpaceInPool *float64 `json:"maxBytesToMergeAtMinSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_min_space_in_pool,omitempty"` -// Maximum period to clean old queue logs, blocks hashes and parts. Default value: 300 seconds. -MaxCleanupDelayPeriod *float64 `json:"maxCleanupDelayPeriod,omitempty" tf:"max_cleanup_delay_period,omitempty"` + // Maximum period to clean old queue logs, blocks hashes and parts. Default value: 300 seconds. + MaxCleanupDelayPeriod *float64 `json:"maxCleanupDelayPeriod,omitempty" tf:"max_cleanup_delay_period,omitempty"` -// Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters. Default value: 60000 milliseconds (60 seconds). -MaxMergeSelectingSleepMs *float64 `json:"maxMergeSelectingSleepMs,omitempty" tf:"max_merge_selecting_sleep_ms,omitempty"` + // Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters. Default value: 60000 milliseconds (60 seconds). + MaxMergeSelectingSleepMs *float64 `json:"maxMergeSelectingSleepMs,omitempty" tf:"max_merge_selecting_sleep_ms,omitempty"` -// When there is more than specified number of merges with TTL entries in pool, do not assign new merge with TTL. -MaxNumberOfMergesWithTTLInPool *float64 `json:"maxNumberOfMergesWithTtlInPool,omitempty" tf:"max_number_of_merges_with_ttl_in_pool,omitempty"` + // When there is more than specified number of merges with TTL entries in pool, do not assign new merge with TTL. + MaxNumberOfMergesWithTTLInPool *float64 `json:"maxNumberOfMergesWithTtlInPool,omitempty" tf:"max_number_of_merges_with_ttl_in_pool,omitempty"` -// Maximum number of parts in all partitions. -MaxPartsInTotal *float64 `json:"maxPartsInTotal,omitempty" tf:"max_parts_in_total,omitempty"` + // Maximum number of parts in all partitions. + MaxPartsInTotal *float64 `json:"maxPartsInTotal,omitempty" tf:"max_parts_in_total,omitempty"` -// Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time. -MaxReplicatedMergesInQueue *float64 `json:"maxReplicatedMergesInQueue,omitempty" tf:"max_replicated_merges_in_queue,omitempty"` + // Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time. + MaxReplicatedMergesInQueue *float64 `json:"maxReplicatedMergesInQueue,omitempty" tf:"max_replicated_merges_in_queue,omitempty"` -// The number of rows that are read from the merged parts into memory. Default value: 8192. -MergeMaxBlockSize *float64 `json:"mergeMaxBlockSize,omitempty" tf:"merge_max_block_size,omitempty"` + // The number of rows that are read from the merged parts into memory. Default value: 8192. + MergeMaxBlockSize *float64 `json:"mergeMaxBlockSize,omitempty" tf:"merge_max_block_size,omitempty"` -// Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters. -MergeSelectingSleepMs *float64 `json:"mergeSelectingSleepMs,omitempty" tf:"merge_selecting_sleep_ms,omitempty"` + // Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters. + MergeSelectingSleepMs *float64 `json:"mergeSelectingSleepMs,omitempty" tf:"merge_selecting_sleep_ms,omitempty"` -// Minimum delay in seconds before repeating a merge with recompression TTL. Default value: 14400 seconds (4 hours). -MergeWithRecompressionTTLTimeout *float64 `json:"mergeWithRecompressionTtlTimeout,omitempty" tf:"merge_with_recompression_ttl_timeout,omitempty"` + // Minimum delay in seconds before repeating a merge with recompression TTL. Default value: 14400 seconds (4 hours). + MergeWithRecompressionTTLTimeout *float64 `json:"mergeWithRecompressionTtlTimeout,omitempty" tf:"merge_with_recompression_ttl_timeout,omitempty"` -// Minimum delay in seconds before repeating a merge with delete TTL. Default value: 14400 seconds (4 hours). -MergeWithTTLTimeout *float64 `json:"mergeWithTtlTimeout,omitempty" tf:"merge_with_ttl_timeout,omitempty"` + // Minimum delay in seconds before repeating a merge with delete TTL. Default value: 14400 seconds (4 hours). + MergeWithTTLTimeout *float64 `json:"mergeWithTtlTimeout,omitempty" tf:"merge_with_ttl_timeout,omitempty"` -// Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset. -MinAgeToForceMergeOnPartitionOnly *bool `json:"minAgeToForceMergeOnPartitionOnly,omitempty" tf:"min_age_to_force_merge_on_partition_only,omitempty"` + // Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset. + MinAgeToForceMergeOnPartitionOnly *bool `json:"minAgeToForceMergeOnPartitionOnly,omitempty" tf:"min_age_to_force_merge_on_partition_only,omitempty"` -// Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds. -MinAgeToForceMergeSeconds *float64 `json:"minAgeToForceMergeSeconds,omitempty" tf:"min_age_to_force_merge_seconds,omitempty"` + // Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds. + MinAgeToForceMergeSeconds *float64 `json:"minAgeToForceMergeSeconds,omitempty" tf:"min_age_to_force_merge_seconds,omitempty"` -// Minimum number of bytes in a data part that can be stored in Wide format. You can set one, both or none of these settings. -MinBytesForWidePart *float64 `json:"minBytesForWidePart,omitempty" tf:"min_bytes_for_wide_part,omitempty"` + // Minimum number of bytes in a data part that can be stored in Wide format. You can set one, both or none of these settings. + MinBytesForWidePart *float64 `json:"minBytesForWidePart,omitempty" tf:"min_bytes_for_wide_part,omitempty"` -// Minimum number of rows in a data part that can be stored in Wide format. You can set one, both or none of these settings. -MinRowsForWidePart *float64 `json:"minRowsForWidePart,omitempty" tf:"min_rows_for_wide_part,omitempty"` + // Minimum number of rows in a data part that can be stored in Wide format. You can set one, both or none of these settings. + MinRowsForWidePart *float64 `json:"minRowsForWidePart,omitempty" tf:"min_rows_for_wide_part,omitempty"` -// When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid "Too many parts". Default value: 20. -NumberOfFreeEntriesInPoolToExecuteMutation *float64 `json:"numberOfFreeEntriesInPoolToExecuteMutation,omitempty" tf:"number_of_free_entries_in_pool_to_execute_mutation,omitempty"` + // When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid "Too many parts". Default value: 20. + NumberOfFreeEntriesInPoolToExecuteMutation *float64 `json:"numberOfFreeEntriesInPoolToExecuteMutation,omitempty" tf:"number_of_free_entries_in_pool_to_execute_mutation,omitempty"` -// Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges. -NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge *float64 `json:"numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge,omitempty" tf:"number_of_free_entries_in_pool_to_lower_max_size_of_merge,omitempty"` + // Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges. + NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge *float64 `json:"numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge,omitempty" tf:"number_of_free_entries_in_pool_to_lower_max_size_of_merge,omitempty"` -// Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table. -PartsToDelayInsert *float64 `json:"partsToDelayInsert,omitempty" tf:"parts_to_delay_insert,omitempty"` + // Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table. + PartsToDelayInsert *float64 `json:"partsToDelayInsert,omitempty" tf:"parts_to_delay_insert,omitempty"` -// Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception. -PartsToThrowInsert *float64 `json:"partsToThrowInsert,omitempty" tf:"parts_to_throw_insert,omitempty"` + // Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception. + PartsToThrowInsert *float64 `json:"partsToThrowInsert,omitempty" tf:"parts_to_throw_insert,omitempty"` -// Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted). -ReplicatedDeduplicationWindow *float64 `json:"replicatedDeduplicationWindow,omitempty" tf:"replicated_deduplication_window,omitempty"` + // Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted). + ReplicatedDeduplicationWindow *float64 `json:"replicatedDeduplicationWindow,omitempty" tf:"replicated_deduplication_window,omitempty"` -// Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted). -ReplicatedDeduplicationWindowSeconds *float64 `json:"replicatedDeduplicationWindowSeconds,omitempty" tf:"replicated_deduplication_window_seconds,omitempty"` + // Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted). + ReplicatedDeduplicationWindowSeconds *float64 `json:"replicatedDeduplicationWindowSeconds,omitempty" tf:"replicated_deduplication_window_seconds,omitempty"` -// Enables zero-copy replication when a replica is located on a remote filesystem. -TTLOnlyDropParts *bool `json:"ttlOnlyDropParts,omitempty" tf:"ttl_only_drop_parts,omitempty"` + // Enables zero-copy replication when a replica is located on a remote filesystem. + TTLOnlyDropParts *bool `json:"ttlOnlyDropParts,omitempty" tf:"ttl_only_drop_parts,omitempty"` } - type MergeTreeParameters struct { + // When this setting has a value greater than zero only a single replica starts the merge immediately if merged part on shared storage and allow_remote_fs_zero_copy_replication is enabled. + // +kubebuilder:validation:Optional + AllowRemoteFsZeroCopyReplication *bool `json:"allowRemoteFsZeroCopyReplication,omitempty" tf:"allow_remote_fs_zero_copy_replication,omitempty"` -// When this setting has a value greater than zero only a single replica starts the merge immediately if merged part on shared storage and allow_remote_fs_zero_copy_replication is enabled. -// +kubebuilder:validation:Optional -AllowRemoteFsZeroCopyReplication *bool `json:"allowRemoteFsZeroCopyReplication,omitempty" tf:"allow_remote_fs_zero_copy_replication,omitempty"` - -// Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned integer types: UInt8, UInt16, UInt32, UInt64. Default value: true. -// +kubebuilder:validation:Optional -CheckSampleColumnIsCorrect *bool `json:"checkSampleColumnIsCorrect,omitempty" tf:"check_sample_column_is_correct,omitempty"` + // Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned integer types: UInt8, UInt16, UInt32, UInt64. Default value: true. + // +kubebuilder:validation:Optional + CheckSampleColumnIsCorrect *bool `json:"checkSampleColumnIsCorrect,omitempty" tf:"check_sample_column_is_correct,omitempty"` -// Minimum period to clean old queue logs, blocks hashes and parts. -// +kubebuilder:validation:Optional -CleanupDelayPeriod *float64 `json:"cleanupDelayPeriod,omitempty" tf:"cleanup_delay_period,omitempty"` + // Minimum period to clean old queue logs, blocks hashes and parts. + // +kubebuilder:validation:Optional + CleanupDelayPeriod *float64 `json:"cleanupDelayPeriod,omitempty" tf:"cleanup_delay_period,omitempty"` -// If the number of inactive parts in a single partition in the table at least that many the inactive_parts_to_delay_insert value, an INSERT artificially slows down. It is useful when a server fails to clean up parts quickly enough. -// +kubebuilder:validation:Optional -InactivePartsToDelayInsert *float64 `json:"inactivePartsToDelayInsert,omitempty" tf:"inactive_parts_to_delay_insert,omitempty"` + // If the number of inactive parts in a single partition in the table at least that many the inactive_parts_to_delay_insert value, an INSERT artificially slows down. It is useful when a server fails to clean up parts quickly enough. + // +kubebuilder:validation:Optional + InactivePartsToDelayInsert *float64 `json:"inactivePartsToDelayInsert,omitempty" tf:"inactive_parts_to_delay_insert,omitempty"` -// If the number of inactive parts in a single partition more than the inactive_parts_to_throw_insert value, INSERT is interrupted with the "Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts" exception. -// +kubebuilder:validation:Optional -InactivePartsToThrowInsert *float64 `json:"inactivePartsToThrowInsert,omitempty" tf:"inactive_parts_to_throw_insert,omitempty"` + // If the number of inactive parts in a single partition more than the inactive_parts_to_throw_insert value, INSERT is interrupted with the "Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts" exception. + // +kubebuilder:validation:Optional + InactivePartsToThrowInsert *float64 `json:"inactivePartsToThrowInsert,omitempty" tf:"inactive_parts_to_throw_insert,omitempty"` -// The too many parts check according to parts_to_delay_insert and parts_to_throw_insert will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts. -// +kubebuilder:validation:Optional -MaxAvgPartSizeForTooManyParts *float64 `json:"maxAvgPartSizeForTooManyParts,omitempty" tf:"max_avg_part_size_for_too_many_parts,omitempty"` + // The too many parts check according to parts_to_delay_insert and parts_to_throw_insert will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts. + // +kubebuilder:validation:Optional + MaxAvgPartSizeForTooManyParts *float64 `json:"maxAvgPartSizeForTooManyParts,omitempty" tf:"max_avg_part_size_for_too_many_parts,omitempty"` -// The maximum total parts size (in bytes) to be merged into one part, if there are enough resources available. max_bytes_to_merge_at_max_space_in_pool -- roughly corresponds to the maximum possible part size created by an automatic background merge. -// +kubebuilder:validation:Optional -MaxBytesToMergeAtMaxSpaceInPool *float64 `json:"maxBytesToMergeAtMaxSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_max_space_in_pool,omitempty"` + // The maximum total parts size (in bytes) to be merged into one part, if there are enough resources available. max_bytes_to_merge_at_max_space_in_pool -- roughly corresponds to the maximum possible part size created by an automatic background merge. + // +kubebuilder:validation:Optional + MaxBytesToMergeAtMaxSpaceInPool *float64 `json:"maxBytesToMergeAtMaxSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_max_space_in_pool,omitempty"` -// Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum. -// +kubebuilder:validation:Optional -MaxBytesToMergeAtMinSpaceInPool *float64 `json:"maxBytesToMergeAtMinSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_min_space_in_pool,omitempty"` + // Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum. + // +kubebuilder:validation:Optional + MaxBytesToMergeAtMinSpaceInPool *float64 `json:"maxBytesToMergeAtMinSpaceInPool,omitempty" tf:"max_bytes_to_merge_at_min_space_in_pool,omitempty"` -// Maximum period to clean old queue logs, blocks hashes and parts. Default value: 300 seconds. -// +kubebuilder:validation:Optional -MaxCleanupDelayPeriod *float64 `json:"maxCleanupDelayPeriod,omitempty" tf:"max_cleanup_delay_period,omitempty"` + // Maximum period to clean old queue logs, blocks hashes and parts. Default value: 300 seconds. + // +kubebuilder:validation:Optional + MaxCleanupDelayPeriod *float64 `json:"maxCleanupDelayPeriod,omitempty" tf:"max_cleanup_delay_period,omitempty"` -// Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters. Default value: 60000 milliseconds (60 seconds). -// +kubebuilder:validation:Optional -MaxMergeSelectingSleepMs *float64 `json:"maxMergeSelectingSleepMs,omitempty" tf:"max_merge_selecting_sleep_ms,omitempty"` + // Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters. Default value: 60000 milliseconds (60 seconds). + // +kubebuilder:validation:Optional + MaxMergeSelectingSleepMs *float64 `json:"maxMergeSelectingSleepMs,omitempty" tf:"max_merge_selecting_sleep_ms,omitempty"` -// When there is more than specified number of merges with TTL entries in pool, do not assign new merge with TTL. -// +kubebuilder:validation:Optional -MaxNumberOfMergesWithTTLInPool *float64 `json:"maxNumberOfMergesWithTtlInPool,omitempty" tf:"max_number_of_merges_with_ttl_in_pool,omitempty"` + // When there is more than specified number of merges with TTL entries in pool, do not assign new merge with TTL. + // +kubebuilder:validation:Optional + MaxNumberOfMergesWithTTLInPool *float64 `json:"maxNumberOfMergesWithTtlInPool,omitempty" tf:"max_number_of_merges_with_ttl_in_pool,omitempty"` -// Maximum number of parts in all partitions. -// +kubebuilder:validation:Optional -MaxPartsInTotal *float64 `json:"maxPartsInTotal,omitempty" tf:"max_parts_in_total,omitempty"` + // Maximum number of parts in all partitions. + // +kubebuilder:validation:Optional + MaxPartsInTotal *float64 `json:"maxPartsInTotal,omitempty" tf:"max_parts_in_total,omitempty"` -// Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time. -// +kubebuilder:validation:Optional -MaxReplicatedMergesInQueue *float64 `json:"maxReplicatedMergesInQueue,omitempty" tf:"max_replicated_merges_in_queue,omitempty"` + // Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time. + // +kubebuilder:validation:Optional + MaxReplicatedMergesInQueue *float64 `json:"maxReplicatedMergesInQueue,omitempty" tf:"max_replicated_merges_in_queue,omitempty"` -// The number of rows that are read from the merged parts into memory. Default value: 8192. -// +kubebuilder:validation:Optional -MergeMaxBlockSize *float64 `json:"mergeMaxBlockSize,omitempty" tf:"merge_max_block_size,omitempty"` + // The number of rows that are read from the merged parts into memory. Default value: 8192. + // +kubebuilder:validation:Optional + MergeMaxBlockSize *float64 `json:"mergeMaxBlockSize,omitempty" tf:"merge_max_block_size,omitempty"` -// Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters. -// +kubebuilder:validation:Optional -MergeSelectingSleepMs *float64 `json:"mergeSelectingSleepMs,omitempty" tf:"merge_selecting_sleep_ms,omitempty"` + // Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters. + // +kubebuilder:validation:Optional + MergeSelectingSleepMs *float64 `json:"mergeSelectingSleepMs,omitempty" tf:"merge_selecting_sleep_ms,omitempty"` -// Minimum delay in seconds before repeating a merge with recompression TTL. Default value: 14400 seconds (4 hours). -// +kubebuilder:validation:Optional -MergeWithRecompressionTTLTimeout *float64 `json:"mergeWithRecompressionTtlTimeout,omitempty" tf:"merge_with_recompression_ttl_timeout,omitempty"` + // Minimum delay in seconds before repeating a merge with recompression TTL. Default value: 14400 seconds (4 hours). + // +kubebuilder:validation:Optional + MergeWithRecompressionTTLTimeout *float64 `json:"mergeWithRecompressionTtlTimeout,omitempty" tf:"merge_with_recompression_ttl_timeout,omitempty"` -// Minimum delay in seconds before repeating a merge with delete TTL. Default value: 14400 seconds (4 hours). -// +kubebuilder:validation:Optional -MergeWithTTLTimeout *float64 `json:"mergeWithTtlTimeout,omitempty" tf:"merge_with_ttl_timeout,omitempty"` + // Minimum delay in seconds before repeating a merge with delete TTL. Default value: 14400 seconds (4 hours). + // +kubebuilder:validation:Optional + MergeWithTTLTimeout *float64 `json:"mergeWithTtlTimeout,omitempty" tf:"merge_with_ttl_timeout,omitempty"` -// Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset. -// +kubebuilder:validation:Optional -MinAgeToForceMergeOnPartitionOnly *bool `json:"minAgeToForceMergeOnPartitionOnly,omitempty" tf:"min_age_to_force_merge_on_partition_only,omitempty"` + // Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset. + // +kubebuilder:validation:Optional + MinAgeToForceMergeOnPartitionOnly *bool `json:"minAgeToForceMergeOnPartitionOnly,omitempty" tf:"min_age_to_force_merge_on_partition_only,omitempty"` -// Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds. -// +kubebuilder:validation:Optional -MinAgeToForceMergeSeconds *float64 `json:"minAgeToForceMergeSeconds,omitempty" tf:"min_age_to_force_merge_seconds,omitempty"` + // Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds. + // +kubebuilder:validation:Optional + MinAgeToForceMergeSeconds *float64 `json:"minAgeToForceMergeSeconds,omitempty" tf:"min_age_to_force_merge_seconds,omitempty"` -// Minimum number of bytes in a data part that can be stored in Wide format. You can set one, both or none of these settings. -// +kubebuilder:validation:Optional -MinBytesForWidePart *float64 `json:"minBytesForWidePart,omitempty" tf:"min_bytes_for_wide_part,omitempty"` + // Minimum number of bytes in a data part that can be stored in Wide format. You can set one, both or none of these settings. + // +kubebuilder:validation:Optional + MinBytesForWidePart *float64 `json:"minBytesForWidePart,omitempty" tf:"min_bytes_for_wide_part,omitempty"` -// Minimum number of rows in a data part that can be stored in Wide format. You can set one, both or none of these settings. -// +kubebuilder:validation:Optional -MinRowsForWidePart *float64 `json:"minRowsForWidePart,omitempty" tf:"min_rows_for_wide_part,omitempty"` + // Minimum number of rows in a data part that can be stored in Wide format. You can set one, both or none of these settings. + // +kubebuilder:validation:Optional + MinRowsForWidePart *float64 `json:"minRowsForWidePart,omitempty" tf:"min_rows_for_wide_part,omitempty"` -// When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid "Too many parts". Default value: 20. -// +kubebuilder:validation:Optional -NumberOfFreeEntriesInPoolToExecuteMutation *float64 `json:"numberOfFreeEntriesInPoolToExecuteMutation,omitempty" tf:"number_of_free_entries_in_pool_to_execute_mutation,omitempty"` + // When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid "Too many parts". Default value: 20. + // +kubebuilder:validation:Optional + NumberOfFreeEntriesInPoolToExecuteMutation *float64 `json:"numberOfFreeEntriesInPoolToExecuteMutation,omitempty" tf:"number_of_free_entries_in_pool_to_execute_mutation,omitempty"` -// Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges. -// +kubebuilder:validation:Optional -NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge *float64 `json:"numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge,omitempty" tf:"number_of_free_entries_in_pool_to_lower_max_size_of_merge,omitempty"` + // Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges. + // +kubebuilder:validation:Optional + NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge *float64 `json:"numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge,omitempty" tf:"number_of_free_entries_in_pool_to_lower_max_size_of_merge,omitempty"` -// Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table. -// +kubebuilder:validation:Optional -PartsToDelayInsert *float64 `json:"partsToDelayInsert,omitempty" tf:"parts_to_delay_insert,omitempty"` + // Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table. + // +kubebuilder:validation:Optional + PartsToDelayInsert *float64 `json:"partsToDelayInsert,omitempty" tf:"parts_to_delay_insert,omitempty"` -// Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception. -// +kubebuilder:validation:Optional -PartsToThrowInsert *float64 `json:"partsToThrowInsert,omitempty" tf:"parts_to_throw_insert,omitempty"` + // Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception. + // +kubebuilder:validation:Optional + PartsToThrowInsert *float64 `json:"partsToThrowInsert,omitempty" tf:"parts_to_throw_insert,omitempty"` -// Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted). -// +kubebuilder:validation:Optional -ReplicatedDeduplicationWindow *float64 `json:"replicatedDeduplicationWindow,omitempty" tf:"replicated_deduplication_window,omitempty"` + // Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted). + // +kubebuilder:validation:Optional + ReplicatedDeduplicationWindow *float64 `json:"replicatedDeduplicationWindow,omitempty" tf:"replicated_deduplication_window,omitempty"` -// Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted). -// +kubebuilder:validation:Optional -ReplicatedDeduplicationWindowSeconds *float64 `json:"replicatedDeduplicationWindowSeconds,omitempty" tf:"replicated_deduplication_window_seconds,omitempty"` + // Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted). + // +kubebuilder:validation:Optional + ReplicatedDeduplicationWindowSeconds *float64 `json:"replicatedDeduplicationWindowSeconds,omitempty" tf:"replicated_deduplication_window_seconds,omitempty"` -// Enables zero-copy replication when a replica is located on a remote filesystem. -// +kubebuilder:validation:Optional -TTLOnlyDropParts *bool `json:"ttlOnlyDropParts,omitempty" tf:"ttl_only_drop_parts,omitempty"` + // Enables zero-copy replication when a replica is located on a remote filesystem. + // +kubebuilder:validation:Optional + TTLOnlyDropParts *bool `json:"ttlOnlyDropParts,omitempty" tf:"ttl_only_drop_parts,omitempty"` } - type PatternInitParameters struct { + // Aggregation function name. + Function *string `json:"function,omitempty" tf:"function,omitempty"` -// Aggregation function name. -Function *string `json:"function,omitempty" tf:"function,omitempty"` + // Regular expression that the metric name must match. + Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` -// Regular expression that the metric name must match. -Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` - -// Retain parameters. -Retention []RetentionInitParameters `json:"retention,omitempty" tf:"retention,omitempty"` + // Retain parameters. + Retention []RetentionInitParameters `json:"retention,omitempty" tf:"retention,omitempty"` } - type PatternObservation struct { + // Aggregation function name. + Function *string `json:"function,omitempty" tf:"function,omitempty"` -// Aggregation function name. -Function *string `json:"function,omitempty" tf:"function,omitempty"` - -// Regular expression that the metric name must match. -Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` + // Regular expression that the metric name must match. + Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` -// Retain parameters. -Retention []RetentionObservation `json:"retention,omitempty" tf:"retention,omitempty"` + // Retain parameters. + Retention []RetentionObservation `json:"retention,omitempty" tf:"retention,omitempty"` } - type PatternParameters struct { + // Aggregation function name. + // +kubebuilder:validation:Optional + Function *string `json:"function" tf:"function,omitempty"` -// Aggregation function name. -// +kubebuilder:validation:Optional -Function *string `json:"function" tf:"function,omitempty"` - -// Regular expression that the metric name must match. -// +kubebuilder:validation:Optional -Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` + // Regular expression that the metric name must match. + // +kubebuilder:validation:Optional + Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` -// Retain parameters. -// +kubebuilder:validation:Optional -Retention []RetentionParameters `json:"retention,omitempty" tf:"retention,omitempty"` + // Retain parameters. + // +kubebuilder:validation:Optional + Retention []RetentionParameters `json:"retention,omitempty" tf:"retention,omitempty"` } - type PermissionInitParameters struct { - -// The name of the database that the permission grants access to. -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` } - type PermissionObservation struct { - -// The name of the database that the permission grants access to. -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` } - type PermissionParameters struct { - -// The name of the database that the permission grants access to. -// +kubebuilder:validation:Optional -DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + // The name of the database that the permission grants access to. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` } - type QueryCacheInitParameters struct { + // The maximum number of SELECT query results stored in the cache. Default value: 1024. + MaxEntries *float64 `json:"maxEntries,omitempty" tf:"max_entries,omitempty"` -// The maximum number of SELECT query results stored in the cache. Default value: 1024. -MaxEntries *float64 `json:"maxEntries,omitempty" tf:"max_entries,omitempty"` - -// The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: 1048576 (1 MiB). -MaxEntrySizeInBytes *float64 `json:"maxEntrySizeInBytes,omitempty" tf:"max_entry_size_in_bytes,omitempty"` + // The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: 1048576 (1 MiB). + MaxEntrySizeInBytes *float64 `json:"maxEntrySizeInBytes,omitempty" tf:"max_entry_size_in_bytes,omitempty"` -// The maximum number of rows SELECT query results may have to be saved in the cache. Default value: 30000000 (30 mil). -MaxEntrySizeInRows *float64 `json:"maxEntrySizeInRows,omitempty" tf:"max_entry_size_in_rows,omitempty"` + // The maximum number of rows SELECT query results may have to be saved in the cache. Default value: 30000000 (30 mil). + MaxEntrySizeInRows *float64 `json:"maxEntrySizeInRows,omitempty" tf:"max_entry_size_in_rows,omitempty"` -// The maximum cache size in bytes. 0 means the query cache is disabled. Default value: 1073741824 (1 GiB). -MaxSizeInBytes *float64 `json:"maxSizeInBytes,omitempty" tf:"max_size_in_bytes,omitempty"` + // The maximum cache size in bytes. 0 means the query cache is disabled. Default value: 1073741824 (1 GiB). + MaxSizeInBytes *float64 `json:"maxSizeInBytes,omitempty" tf:"max_size_in_bytes,omitempty"` } - type QueryCacheObservation struct { + // The maximum number of SELECT query results stored in the cache. Default value: 1024. + MaxEntries *float64 `json:"maxEntries,omitempty" tf:"max_entries,omitempty"` -// The maximum number of SELECT query results stored in the cache. Default value: 1024. -MaxEntries *float64 `json:"maxEntries,omitempty" tf:"max_entries,omitempty"` - -// The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: 1048576 (1 MiB). -MaxEntrySizeInBytes *float64 `json:"maxEntrySizeInBytes,omitempty" tf:"max_entry_size_in_bytes,omitempty"` + // The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: 1048576 (1 MiB). + MaxEntrySizeInBytes *float64 `json:"maxEntrySizeInBytes,omitempty" tf:"max_entry_size_in_bytes,omitempty"` -// The maximum number of rows SELECT query results may have to be saved in the cache. Default value: 30000000 (30 mil). -MaxEntrySizeInRows *float64 `json:"maxEntrySizeInRows,omitempty" tf:"max_entry_size_in_rows,omitempty"` + // The maximum number of rows SELECT query results may have to be saved in the cache. Default value: 30000000 (30 mil). + MaxEntrySizeInRows *float64 `json:"maxEntrySizeInRows,omitempty" tf:"max_entry_size_in_rows,omitempty"` -// The maximum cache size in bytes. 0 means the query cache is disabled. Default value: 1073741824 (1 GiB). -MaxSizeInBytes *float64 `json:"maxSizeInBytes,omitempty" tf:"max_size_in_bytes,omitempty"` + // The maximum cache size in bytes. 0 means the query cache is disabled. Default value: 1073741824 (1 GiB). + MaxSizeInBytes *float64 `json:"maxSizeInBytes,omitempty" tf:"max_size_in_bytes,omitempty"` } - type QueryCacheParameters struct { + // The maximum number of SELECT query results stored in the cache. Default value: 1024. + // +kubebuilder:validation:Optional + MaxEntries *float64 `json:"maxEntries,omitempty" tf:"max_entries,omitempty"` -// The maximum number of SELECT query results stored in the cache. Default value: 1024. -// +kubebuilder:validation:Optional -MaxEntries *float64 `json:"maxEntries,omitempty" tf:"max_entries,omitempty"` + // The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: 1048576 (1 MiB). + // +kubebuilder:validation:Optional + MaxEntrySizeInBytes *float64 `json:"maxEntrySizeInBytes,omitempty" tf:"max_entry_size_in_bytes,omitempty"` -// The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: 1048576 (1 MiB). -// +kubebuilder:validation:Optional -MaxEntrySizeInBytes *float64 `json:"maxEntrySizeInBytes,omitempty" tf:"max_entry_size_in_bytes,omitempty"` + // The maximum number of rows SELECT query results may have to be saved in the cache. Default value: 30000000 (30 mil). + // +kubebuilder:validation:Optional + MaxEntrySizeInRows *float64 `json:"maxEntrySizeInRows,omitempty" tf:"max_entry_size_in_rows,omitempty"` -// The maximum number of rows SELECT query results may have to be saved in the cache. Default value: 30000000 (30 mil). -// +kubebuilder:validation:Optional -MaxEntrySizeInRows *float64 `json:"maxEntrySizeInRows,omitempty" tf:"max_entry_size_in_rows,omitempty"` - -// The maximum cache size in bytes. 0 means the query cache is disabled. Default value: 1073741824 (1 GiB). -// +kubebuilder:validation:Optional -MaxSizeInBytes *float64 `json:"maxSizeInBytes,omitempty" tf:"max_size_in_bytes,omitempty"` + // The maximum cache size in bytes. 0 means the query cache is disabled. Default value: 1073741824 (1 GiB). + // +kubebuilder:validation:Optional + MaxSizeInBytes *float64 `json:"maxSizeInBytes,omitempty" tf:"max_size_in_bytes,omitempty"` } - type QueryMaskingRulesInitParameters struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Regular expression that the metric name must match. -Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` + // Regular expression that the metric name must match. + Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` -// Substitution string for sensitive data. Default value: six asterisks. -Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` + // Substitution string for sensitive data. Default value: six asterisks. + Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` } - type QueryMaskingRulesObservation struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Regular expression that the metric name must match. -Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` + // Regular expression that the metric name must match. + Regexp *string `json:"regexp,omitempty" tf:"regexp,omitempty"` -// Substitution string for sensitive data. Default value: six asterisks. -Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` + // Substitution string for sensitive data. Default value: six asterisks. + Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` } - type QueryMaskingRulesParameters struct { + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Regular expression that the metric name must match. -// +kubebuilder:validation:Optional -Regexp *string `json:"regexp" tf:"regexp,omitempty"` + // Regular expression that the metric name must match. + // +kubebuilder:validation:Optional + Regexp *string `json:"regexp" tf:"regexp,omitempty"` -// Substitution string for sensitive data. Default value: six asterisks. -// +kubebuilder:validation:Optional -Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` + // Substitution string for sensitive data. Default value: six asterisks. + // +kubebuilder:validation:Optional + Replace *string `json:"replace,omitempty" tf:"replace,omitempty"` } - type QuotaInitParameters struct { + // The number of queries that threw exception. + Errors *float64 `json:"errors,omitempty" tf:"errors,omitempty"` -// The number of queries that threw exception. -Errors *float64 `json:"errors,omitempty" tf:"errors,omitempty"` - -// The total query execution time, in milliseconds (wall time). -ExecutionTime *float64 `json:"executionTime,omitempty" tf:"execution_time,omitempty"` + // The total query execution time, in milliseconds (wall time). + ExecutionTime *float64 `json:"executionTime,omitempty" tf:"execution_time,omitempty"` -// Duration of interval for quota in milliseconds. -IntervalDuration *float64 `json:"intervalDuration,omitempty" tf:"interval_duration,omitempty"` + // Duration of interval for quota in milliseconds. + IntervalDuration *float64 `json:"intervalDuration,omitempty" tf:"interval_duration,omitempty"` -// The total number of queries. -Queries *float64 `json:"queries,omitempty" tf:"queries,omitempty"` + // The total number of queries. + Queries *float64 `json:"queries,omitempty" tf:"queries,omitempty"` -// The total number of source rows read from tables for running the query, on all remote servers. -ReadRows *float64 `json:"readRows,omitempty" tf:"read_rows,omitempty"` + // The total number of source rows read from tables for running the query, on all remote servers. + ReadRows *float64 `json:"readRows,omitempty" tf:"read_rows,omitempty"` -// The total number of rows given as the result. -ResultRows *float64 `json:"resultRows,omitempty" tf:"result_rows,omitempty"` + // The total number of rows given as the result. + ResultRows *float64 `json:"resultRows,omitempty" tf:"result_rows,omitempty"` } - type QuotaObservation struct { + // The number of queries that threw exception. + Errors *float64 `json:"errors,omitempty" tf:"errors,omitempty"` -// The number of queries that threw exception. -Errors *float64 `json:"errors,omitempty" tf:"errors,omitempty"` + // The total query execution time, in milliseconds (wall time). + ExecutionTime *float64 `json:"executionTime,omitempty" tf:"execution_time,omitempty"` -// The total query execution time, in milliseconds (wall time). -ExecutionTime *float64 `json:"executionTime,omitempty" tf:"execution_time,omitempty"` + // Duration of interval for quota in milliseconds. + IntervalDuration *float64 `json:"intervalDuration,omitempty" tf:"interval_duration,omitempty"` -// Duration of interval for quota in milliseconds. -IntervalDuration *float64 `json:"intervalDuration,omitempty" tf:"interval_duration,omitempty"` + // The total number of queries. + Queries *float64 `json:"queries,omitempty" tf:"queries,omitempty"` -// The total number of queries. -Queries *float64 `json:"queries,omitempty" tf:"queries,omitempty"` + // The total number of source rows read from tables for running the query, on all remote servers. + ReadRows *float64 `json:"readRows,omitempty" tf:"read_rows,omitempty"` -// The total number of source rows read from tables for running the query, on all remote servers. -ReadRows *float64 `json:"readRows,omitempty" tf:"read_rows,omitempty"` - -// The total number of rows given as the result. -ResultRows *float64 `json:"resultRows,omitempty" tf:"result_rows,omitempty"` + // The total number of rows given as the result. + ResultRows *float64 `json:"resultRows,omitempty" tf:"result_rows,omitempty"` } - type QuotaParameters struct { + // The number of queries that threw exception. + // +kubebuilder:validation:Optional + Errors *float64 `json:"errors,omitempty" tf:"errors,omitempty"` -// The number of queries that threw exception. -// +kubebuilder:validation:Optional -Errors *float64 `json:"errors,omitempty" tf:"errors,omitempty"` - -// The total query execution time, in milliseconds (wall time). -// +kubebuilder:validation:Optional -ExecutionTime *float64 `json:"executionTime,omitempty" tf:"execution_time,omitempty"` + // The total query execution time, in milliseconds (wall time). + // +kubebuilder:validation:Optional + ExecutionTime *float64 `json:"executionTime,omitempty" tf:"execution_time,omitempty"` -// Duration of interval for quota in milliseconds. -// +kubebuilder:validation:Optional -IntervalDuration *float64 `json:"intervalDuration" tf:"interval_duration,omitempty"` + // Duration of interval for quota in milliseconds. + // +kubebuilder:validation:Optional + IntervalDuration *float64 `json:"intervalDuration" tf:"interval_duration,omitempty"` -// The total number of queries. -// +kubebuilder:validation:Optional -Queries *float64 `json:"queries,omitempty" tf:"queries,omitempty"` + // The total number of queries. + // +kubebuilder:validation:Optional + Queries *float64 `json:"queries,omitempty" tf:"queries,omitempty"` -// The total number of source rows read from tables for running the query, on all remote servers. -// +kubebuilder:validation:Optional -ReadRows *float64 `json:"readRows,omitempty" tf:"read_rows,omitempty"` + // The total number of source rows read from tables for running the query, on all remote servers. + // +kubebuilder:validation:Optional + ReadRows *float64 `json:"readRows,omitempty" tf:"read_rows,omitempty"` -// The total number of rows given as the result. -// +kubebuilder:validation:Optional -ResultRows *float64 `json:"resultRows,omitempty" tf:"result_rows,omitempty"` + // The total number of rows given as the result. + // +kubebuilder:validation:Optional + ResultRows *float64 `json:"resultRows,omitempty" tf:"result_rows,omitempty"` } - type RabbitmqInitParameters struct { + // The password of the user. + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` -// The password of the user. -PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` - -// RabbitMQ username. -Username *string `json:"username,omitempty" tf:"username,omitempty"` + // RabbitMQ username. + Username *string `json:"username,omitempty" tf:"username,omitempty"` -// RabbitMQ vhost. Default: ''. -Vhost *string `json:"vhost,omitempty" tf:"vhost,omitempty"` + // RabbitMQ vhost. Default: ”. + Vhost *string `json:"vhost,omitempty" tf:"vhost,omitempty"` } - type RabbitmqObservation struct { + // RabbitMQ username. + Username *string `json:"username,omitempty" tf:"username,omitempty"` -// RabbitMQ username. -Username *string `json:"username,omitempty" tf:"username,omitempty"` - -// RabbitMQ vhost. Default: ''. -Vhost *string `json:"vhost,omitempty" tf:"vhost,omitempty"` + // RabbitMQ vhost. Default: ”. + Vhost *string `json:"vhost,omitempty" tf:"vhost,omitempty"` } - type RabbitmqParameters struct { + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` -// The password of the user. -// +kubebuilder:validation:Optional -PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + // RabbitMQ username. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` -// RabbitMQ username. -// +kubebuilder:validation:Optional -Username *string `json:"username,omitempty" tf:"username,omitempty"` - -// RabbitMQ vhost. Default: ''. -// +kubebuilder:validation:Optional -Vhost *string `json:"vhost,omitempty" tf:"vhost,omitempty"` + // RabbitMQ vhost. Default: ”. + // +kubebuilder:validation:Optional + Vhost *string `json:"vhost,omitempty" tf:"vhost,omitempty"` } - type ResourcesInitParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesObservation struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type RetentionInitParameters struct { + // Minimum data age in seconds. + Age *float64 `json:"age,omitempty" tf:"age,omitempty"` -// Minimum data age in seconds. -Age *float64 `json:"age,omitempty" tf:"age,omitempty"` - -// Accuracy of determining the age of the data in seconds. -Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` + // Accuracy of determining the age of the data in seconds. + Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` } - type RetentionObservation struct { + // Minimum data age in seconds. + Age *float64 `json:"age,omitempty" tf:"age,omitempty"` -// Minimum data age in seconds. -Age *float64 `json:"age,omitempty" tf:"age,omitempty"` - -// Accuracy of determining the age of the data in seconds. -Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` + // Accuracy of determining the age of the data in seconds. + Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` } - type RetentionParameters struct { + // Minimum data age in seconds. + // +kubebuilder:validation:Optional + Age *float64 `json:"age" tf:"age,omitempty"` -// Minimum data age in seconds. -// +kubebuilder:validation:Optional -Age *float64 `json:"age" tf:"age,omitempty"` - -// Accuracy of determining the age of the data in seconds. -// +kubebuilder:validation:Optional -Precision *float64 `json:"precision" tf:"precision,omitempty"` + // Accuracy of determining the age of the data in seconds. + // +kubebuilder:validation:Optional + Precision *float64 `json:"precision" tf:"precision,omitempty"` } - type SettingsInitParameters struct { + // Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. + AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` -// Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. -AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` - -// A comma-separated list of debug contexts to enable. -Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` + // A comma-separated list of debug contexts to enable. + Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` -// enable verification of SSL certificates. -EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` + // enable verification of SSL certificates. + EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` -// Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. -MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` + // Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. + MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` -// SASL mechanism used in kafka authentication. -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // SASL mechanism used in kafka authentication. + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// User password on kafka server. -SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + // User password on kafka server. + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` -// Username on kafka server. -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + // Username on kafka server. + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Security protocol used to connect to kafka server. -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + // Security protocol used to connect to kafka server. + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` -// Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. -SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` + // Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. + SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` } - type SettingsObservation struct { + // Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. + AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` -// Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. -AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` - -// A comma-separated list of debug contexts to enable. -Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` + // A comma-separated list of debug contexts to enable. + Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` -// enable verification of SSL certificates. -EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` + // enable verification of SSL certificates. + EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` -// Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. -MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` + // Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. + MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` -// SASL mechanism used in kafka authentication. -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // SASL mechanism used in kafka authentication. + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// Username on kafka server. -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + // Username on kafka server. + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Security protocol used to connect to kafka server. -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + // Security protocol used to connect to kafka server. + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` -// Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. -SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` + // Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. + SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` } - type SettingsParameters struct { + // Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. + // +kubebuilder:validation:Optional + AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` -// Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. -// +kubebuilder:validation:Optional -AutoOffsetReset *string `json:"autoOffsetReset,omitempty" tf:"auto_offset_reset,omitempty"` + // A comma-separated list of debug contexts to enable. + // +kubebuilder:validation:Optional + Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` -// A comma-separated list of debug contexts to enable. -// +kubebuilder:validation:Optional -Debug *string `json:"debug,omitempty" tf:"debug,omitempty"` + // enable verification of SSL certificates. + // +kubebuilder:validation:Optional + EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` -// enable verification of SSL certificates. -// +kubebuilder:validation:Optional -EnableSSLCertificateVerification *bool `json:"enableSslCertificateVerification,omitempty" tf:"enable_ssl_certificate_verification,omitempty"` + // Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. + // +kubebuilder:validation:Optional + MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` -// Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. -// +kubebuilder:validation:Optional -MaxPollIntervalMs *float64 `json:"maxPollIntervalMs,omitempty" tf:"max_poll_interval_ms,omitempty"` + // SASL mechanism used in kafka authentication. + // +kubebuilder:validation:Optional + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// SASL mechanism used in kafka authentication. -// +kubebuilder:validation:Optional -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // User password on kafka server. + // +kubebuilder:validation:Optional + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` -// User password on kafka server. -// +kubebuilder:validation:Optional -SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + // Username on kafka server. + // +kubebuilder:validation:Optional + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Username on kafka server. -// +kubebuilder:validation:Optional -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + // Security protocol used to connect to kafka server. + // +kubebuilder:validation:Optional + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` -// Security protocol used to connect to kafka server. -// +kubebuilder:validation:Optional -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` - -// Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. -// +kubebuilder:validation:Optional -SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` + // Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. + // +kubebuilder:validation:Optional + SessionTimeoutMs *float64 `json:"sessionTimeoutMs,omitempty" tf:"session_timeout_ms,omitempty"` } - type ShardGroupInitParameters struct { + // Description of the shard group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the shard group. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// The name of the shard group, used as cluster name in Distributed tables. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the shard group, used as cluster name in Distributed tables. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// List of shards names that belong to the shard group. -ShardNames []*string `json:"shardNames,omitempty" tf:"shard_names,omitempty"` + // List of shards names that belong to the shard group. + ShardNames []*string `json:"shardNames,omitempty" tf:"shard_names,omitempty"` } - type ShardGroupObservation struct { + // Description of the shard group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the shard group. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// The name of the shard group, used as cluster name in Distributed tables. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the shard group, used as cluster name in Distributed tables. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// List of shards names that belong to the shard group. -ShardNames []*string `json:"shardNames,omitempty" tf:"shard_names,omitempty"` + // List of shards names that belong to the shard group. + ShardNames []*string `json:"shardNames,omitempty" tf:"shard_names,omitempty"` } - type ShardGroupParameters struct { + // Description of the shard group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the shard group. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // The name of the shard group, used as cluster name in Distributed tables. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of the shard group, used as cluster name in Distributed tables. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// List of shards names that belong to the shard group. -// +kubebuilder:validation:Optional -ShardNames []*string `json:"shardNames" tf:"shard_names,omitempty"` + // List of shards names that belong to the shard group. + // +kubebuilder:validation:Optional + ShardNames []*string `json:"shardNames" tf:"shard_names,omitempty"` } - type ShardInitParameters struct { + // The name of shard. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of shard. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Resources allocated to host of the shard. The resources specified for the shard takes precedence over the resources specified for the cluster. The structure is documented below. + Resources []ShardResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` -// Resources allocated to host of the shard. The resources specified for the shard takes precedence over the resources specified for the cluster. The structure is documented below. -Resources []ShardResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` - -// The weight of shard. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // The weight of shard. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type ShardObservation struct { + // The name of shard. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of shard. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Resources allocated to host of the shard. The resources specified for the shard takes precedence over the resources specified for the cluster. The structure is documented below. + Resources []ShardResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` -// Resources allocated to host of the shard. The resources specified for the shard takes precedence over the resources specified for the cluster. The structure is documented below. -Resources []ShardResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` - -// The weight of shard. -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // The weight of shard. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type ShardParameters struct { + // The name of shard. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of shard. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// Resources allocated to host of the shard. The resources specified for the shard takes precedence over the resources specified for the cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []ShardResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to host of the shard. The resources specified for the shard takes precedence over the resources specified for the cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []ShardResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` -// The weight of shard. -// +kubebuilder:validation:Optional -Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` + // The weight of shard. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` } - type ShardResourcesInitParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ShardResourcesObservation struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ShardResourcesParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type UserInitParameters struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// The password of the user. -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + // The password of the user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// Set of permissions granted to the user. The structure is documented below. -Permission []PermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []PermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` -// Set of user quotas. The structure is documented below. -Quota []QuotaInitParameters `json:"quota,omitempty" tf:"quota,omitempty"` + // Set of user quotas. The structure is documented below. + Quota []QuotaInitParameters `json:"quota,omitempty" tf:"quota,omitempty"` -// Custom settings for user. The list is documented below. -Settings []UserSettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` + // Custom settings for user. The list is documented below. + Settings []UserSettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` } - type UserObservation struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Set of permissions granted to the user. The structure is documented below. -Permission []PermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []PermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` -// Set of user quotas. The structure is documented below. -Quota []QuotaObservation `json:"quota,omitempty" tf:"quota,omitempty"` + // Set of user quotas. The structure is documented below. + Quota []QuotaObservation `json:"quota,omitempty" tf:"quota,omitempty"` -// Custom settings for user. The list is documented below. -Settings []UserSettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` + // Custom settings for user. The list is documented below. + Settings []UserSettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` } - type UserParameters struct { + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// The password of the user. -// +kubebuilder:validation:Optional -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// Set of permissions granted to the user. The structure is documented below. -// +kubebuilder:validation:Optional -Permission []PermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + // +kubebuilder:validation:Optional + Permission []PermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` -// Set of user quotas. The structure is documented below. -// +kubebuilder:validation:Optional -Quota []QuotaParameters `json:"quota,omitempty" tf:"quota,omitempty"` + // Set of user quotas. The structure is documented below. + // +kubebuilder:validation:Optional + Quota []QuotaParameters `json:"quota,omitempty" tf:"quota,omitempty"` -// Custom settings for user. The list is documented below. -// +kubebuilder:validation:Optional -Settings []UserSettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` + // Custom settings for user. The list is documented below. + // +kubebuilder:validation:Optional + Settings []UserSettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` } - type UserSettingsInitParameters struct { + // Include CORS headers in HTTP responces. + AddHTTPCorsHeader *bool `json:"addHttpCorsHeader,omitempty" tf:"add_http_cors_header,omitempty"` -// Include CORS headers in HTTP responces. -AddHTTPCorsHeader *bool `json:"addHttpCorsHeader,omitempty" tf:"add_http_cors_header,omitempty"` + // Allows or denies DDL queries. + AllowDdl *bool `json:"allowDdl,omitempty" tf:"allow_ddl,omitempty"` -// Allows or denies DDL queries. -AllowDdl *bool `json:"allowDdl,omitempty" tf:"allow_ddl,omitempty"` + // Enables introspections functions for query profiling. + AllowIntrospectionFunctions *bool `json:"allowIntrospectionFunctions,omitempty" tf:"allow_introspection_functions,omitempty"` -// Enables introspections functions for query profiling. -AllowIntrospectionFunctions *bool `json:"allowIntrospectionFunctions,omitempty" tf:"allow_introspection_functions,omitempty"` + // Allows specifying LowCardinality modifier for types of small fixed size (8 or less) in CREATE TABLE statements. Enabling this may increase merge times and memory consumption. + AllowSuspiciousLowCardinalityTypes *bool `json:"allowSuspiciousLowCardinalityTypes,omitempty" tf:"allow_suspicious_low_cardinality_types,omitempty"` -// Allows specifying LowCardinality modifier for types of small fixed size (8 or less) in CREATE TABLE statements. Enabling this may increase merge times and memory consumption. -AllowSuspiciousLowCardinalityTypes *bool `json:"allowSuspiciousLowCardinalityTypes,omitempty" tf:"allow_suspicious_low_cardinality_types,omitempty"` + // enables legacy ClickHouse server behaviour in ANY INNER|LEFT JOIN operations. + AnyJoinDistinctRightTableKeys *bool `json:"anyJoinDistinctRightTableKeys,omitempty" tf:"any_join_distinct_right_table_keys,omitempty"` -// enables legacy ClickHouse server behaviour in ANY INNER|LEFT JOIN operations. -AnyJoinDistinctRightTableKeys *bool `json:"anyJoinDistinctRightTableKeys,omitempty" tf:"any_join_distinct_right_table_keys,omitempty"` + // Enables asynchronous inserts. Disabled by default. + AsyncInsert *bool `json:"asyncInsert,omitempty" tf:"async_insert,omitempty"` -// Enables asynchronous inserts. Disabled by default. -AsyncInsert *bool `json:"asyncInsert,omitempty" tf:"async_insert,omitempty"` + // The maximum timeout in milliseconds since the first INSERT query before inserting collected data. If the parameter is set to 0, the timeout is disabled. Default value: 200. + AsyncInsertBusyTimeout *float64 `json:"asyncInsertBusyTimeout,omitempty" tf:"async_insert_busy_timeout,omitempty"` -// The maximum timeout in milliseconds since the first INSERT query before inserting collected data. If the parameter is set to 0, the timeout is disabled. Default value: 200. -AsyncInsertBusyTimeout *float64 `json:"asyncInsertBusyTimeout,omitempty" tf:"async_insert_busy_timeout,omitempty"` + // The maximum size of the unparsed data in bytes collected per query before being inserted. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 100000. + AsyncInsertMaxDataSize *float64 `json:"asyncInsertMaxDataSize,omitempty" tf:"async_insert_max_data_size,omitempty"` -// The maximum size of the unparsed data in bytes collected per query before being inserted. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 100000. -AsyncInsertMaxDataSize *float64 `json:"asyncInsertMaxDataSize,omitempty" tf:"async_insert_max_data_size,omitempty"` + // The maximum timeout in milliseconds since the last INSERT query before dumping collected data. If enabled, the settings prolongs the async_insert_busy_timeout with every INSERT query as long as async_insert_max_data_size is not exceeded. + AsyncInsertStaleTimeout *float64 `json:"asyncInsertStaleTimeout,omitempty" tf:"async_insert_stale_timeout,omitempty"` -// The maximum timeout in milliseconds since the last INSERT query before dumping collected data. If enabled, the settings prolongs the async_insert_busy_timeout with every INSERT query as long as async_insert_max_data_size is not exceeded. -AsyncInsertStaleTimeout *float64 `json:"asyncInsertStaleTimeout,omitempty" tf:"async_insert_stale_timeout,omitempty"` + // The maximum number of threads for background data parsing and insertion. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 16. + AsyncInsertThreads *float64 `json:"asyncInsertThreads,omitempty" tf:"async_insert_threads,omitempty"` -// The maximum number of threads for background data parsing and insertion. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 16. -AsyncInsertThreads *float64 `json:"asyncInsertThreads,omitempty" tf:"async_insert_threads,omitempty"` + // Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. Default value: false. + CancelHTTPReadonlyQueriesOnClientClose *bool `json:"cancelHttpReadonlyQueriesOnClientClose,omitempty" tf:"cancel_http_readonly_queries_on_client_close,omitempty"` -// Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. Default value: false. -CancelHTTPReadonlyQueriesOnClientClose *bool `json:"cancelHttpReadonlyQueriesOnClientClose,omitempty" tf:"cancel_http_readonly_queries_on_client_close,omitempty"` + // Enable compilation of queries. + Compile *bool `json:"compile,omitempty" tf:"compile,omitempty"` -// Enable compilation of queries. -Compile *bool `json:"compile,omitempty" tf:"compile,omitempty"` + // Turn on expression compilation. + CompileExpressions *bool `json:"compileExpressions,omitempty" tf:"compile_expressions,omitempty"` -// Turn on expression compilation. -CompileExpressions *bool `json:"compileExpressions,omitempty" tf:"compile_expressions,omitempty"` + // Connect timeout in milliseconds on the socket used for communicating with the client. + ConnectTimeout *float64 `json:"connectTimeout,omitempty" tf:"connect_timeout,omitempty"` -// Connect timeout in milliseconds on the socket used for communicating with the client. -ConnectTimeout *float64 `json:"connectTimeout,omitempty" tf:"connect_timeout,omitempty"` + // The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition. If unsuccessful, several attempts are made to connect to various replicas. Default value: 50. + ConnectTimeoutWithFailover *float64 `json:"connectTimeoutWithFailover,omitempty" tf:"connect_timeout_with_failover,omitempty"` -// The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition. If unsuccessful, several attempts are made to connect to various replicas. Default value: 50. -ConnectTimeoutWithFailover *float64 `json:"connectTimeoutWithFailover,omitempty" tf:"connect_timeout_with_failover,omitempty"` + // Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction. + CountDistinctImplementation *string `json:"countDistinctImplementation,omitempty" tf:"count_distinct_implementation,omitempty"` -// Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction. -CountDistinctImplementation *string `json:"countDistinctImplementation,omitempty" tf:"count_distinct_implementation,omitempty"` + // Allows choosing a parser of the text representation of date and time, one of: best_effort, basic, best_effort_us. Default value: basic. Cloud default value: best_effort. + DateTimeInputFormat *string `json:"dateTimeInputFormat,omitempty" tf:"date_time_input_format,omitempty"` -// Allows choosing a parser of the text representation of date and time, one of: best_effort, basic, best_effort_us. Default value: basic. Cloud default value: best_effort. -DateTimeInputFormat *string `json:"dateTimeInputFormat,omitempty" tf:"date_time_input_format,omitempty"` + // Allows choosing different output formats of the text representation of date and time, one of: simple, iso, unix_timestamp. Default value: simple. + DateTimeOutputFormat *string `json:"dateTimeOutputFormat,omitempty" tf:"date_time_output_format,omitempty"` -// Allows choosing different output formats of the text representation of date and time, one of: simple, iso, unix_timestamp. Default value: simple. -DateTimeOutputFormat *string `json:"dateTimeOutputFormat,omitempty" tf:"date_time_output_format,omitempty"` + // Enables or disables the deduplication check for materialized views that receive data from Replicated* tables. + DeduplicateBlocksInDependentMaterializedViews *bool `json:"deduplicateBlocksInDependentMaterializedViews,omitempty" tf:"deduplicate_blocks_in_dependent_materialized_views,omitempty"` -// Enables or disables the deduplication check for materialized views that receive data from Replicated* tables. -DeduplicateBlocksInDependentMaterializedViews *bool `json:"deduplicateBlocksInDependentMaterializedViews,omitempty" tf:"deduplicate_blocks_in_dependent_materialized_views,omitempty"` + // Sets behaviour on overflow when using DISTINCT. Possible values: + DistinctOverflowMode *string `json:"distinctOverflowMode,omitempty" tf:"distinct_overflow_mode,omitempty"` -// Sets behaviour on overflow when using DISTINCT. Possible values: -DistinctOverflowMode *string `json:"distinctOverflowMode,omitempty" tf:"distinct_overflow_mode,omitempty"` + // Determine the behavior of distributed subqueries. + DistributedAggregationMemoryEfficient *bool `json:"distributedAggregationMemoryEfficient,omitempty" tf:"distributed_aggregation_memory_efficient,omitempty"` -// Determine the behavior of distributed subqueries. -DistributedAggregationMemoryEfficient *bool `json:"distributedAggregationMemoryEfficient,omitempty" tf:"distributed_aggregation_memory_efficient,omitempty"` + // Timeout for DDL queries, in milliseconds. + DistributedDdlTaskTimeout *float64 `json:"distributedDdlTaskTimeout,omitempty" tf:"distributed_ddl_task_timeout,omitempty"` -// Timeout for DDL queries, in milliseconds. -DistributedDdlTaskTimeout *float64 `json:"distributedDdlTaskTimeout,omitempty" tf:"distributed_ddl_task_timeout,omitempty"` + // Changes the behaviour of distributed subqueries. + DistributedProductMode *string `json:"distributedProductMode,omitempty" tf:"distributed_product_mode,omitempty"` -// Changes the behaviour of distributed subqueries. -DistributedProductMode *string `json:"distributedProductMode,omitempty" tf:"distributed_product_mode,omitempty"` + // Allows to retunr empty result. + EmptyResultForAggregationByEmptySet *bool `json:"emptyResultForAggregationByEmptySet,omitempty" tf:"empty_result_for_aggregation_by_empty_set,omitempty"` -// Allows to retunr empty result. -EmptyResultForAggregationByEmptySet *bool `json:"emptyResultForAggregationByEmptySet,omitempty" tf:"empty_result_for_aggregation_by_empty_set,omitempty"` + // Enables or disables data compression in the response to an HTTP request. + EnableHTTPCompression *bool `json:"enableHttpCompression,omitempty" tf:"enable_http_compression,omitempty"` -// Enables or disables data compression in the response to an HTTP request. -EnableHTTPCompression *bool `json:"enableHttpCompression,omitempty" tf:"enable_http_compression,omitempty"` + // Forces a query to an out-of-date replica if updated data is not available. + FallbackToStaleReplicasForDistributedQueries *bool `json:"fallbackToStaleReplicasForDistributedQueries,omitempty" tf:"fallback_to_stale_replicas_for_distributed_queries,omitempty"` -// Forces a query to an out-of-date replica if updated data is not available. -FallbackToStaleReplicasForDistributedQueries *bool `json:"fallbackToStaleReplicasForDistributedQueries,omitempty" tf:"fallback_to_stale_replicas_for_distributed_queries,omitempty"` + // Sets the data format of a nested columns. + FlattenNested *bool `json:"flattenNested,omitempty" tf:"flatten_nested,omitempty"` -// Sets the data format of a nested columns. -FlattenNested *bool `json:"flattenNested,omitempty" tf:"flatten_nested,omitempty"` + // Disables query execution if the index can’t be used by date. + ForceIndexByDate *bool `json:"forceIndexByDate,omitempty" tf:"force_index_by_date,omitempty"` -// Disables query execution if the index can’t be used by date. -ForceIndexByDate *bool `json:"forceIndexByDate,omitempty" tf:"force_index_by_date,omitempty"` + // Disables query execution if indexing by the primary key is not possible. + ForcePrimaryKey *bool `json:"forcePrimaryKey,omitempty" tf:"force_primary_key,omitempty"` -// Disables query execution if indexing by the primary key is not possible. -ForcePrimaryKey *bool `json:"forcePrimaryKey,omitempty" tf:"force_primary_key,omitempty"` + // Regular expression (for Regexp format). + FormatRegexp *string `json:"formatRegexp,omitempty" tf:"format_regexp,omitempty"` -// Regular expression (for Regexp format). -FormatRegexp *string `json:"formatRegexp,omitempty" tf:"format_regexp,omitempty"` + // Skip lines unmatched by regular expression. + FormatRegexpSkipUnmatched *bool `json:"formatRegexpSkipUnmatched,omitempty" tf:"format_regexp_skip_unmatched,omitempty"` -// Skip lines unmatched by regular expression. -FormatRegexpSkipUnmatched *bool `json:"formatRegexpSkipUnmatched,omitempty" tf:"format_regexp_skip_unmatched,omitempty"` + // Sets behaviour on overflow while GROUP BY operation. Possible values: + GroupByOverflowMode *string `json:"groupByOverflowMode,omitempty" tf:"group_by_overflow_mode,omitempty"` -// Sets behaviour on overflow while GROUP BY operation. Possible values: -GroupByOverflowMode *string `json:"groupByOverflowMode,omitempty" tf:"group_by_overflow_mode,omitempty"` + // Sets the threshold of the number of keys, after that the two-level aggregation should be used. + GroupByTwoLevelThreshold *float64 `json:"groupByTwoLevelThreshold,omitempty" tf:"group_by_two_level_threshold,omitempty"` -// Sets the threshold of the number of keys, after that the two-level aggregation should be used. -GroupByTwoLevelThreshold *float64 `json:"groupByTwoLevelThreshold,omitempty" tf:"group_by_two_level_threshold,omitempty"` + // Sets the threshold of the number of bytes, after that the two-level aggregation should be used. + GroupByTwoLevelThresholdBytes *float64 `json:"groupByTwoLevelThresholdBytes,omitempty" tf:"group_by_two_level_threshold_bytes,omitempty"` -// Sets the threshold of the number of bytes, after that the two-level aggregation should be used. -GroupByTwoLevelThresholdBytes *float64 `json:"groupByTwoLevelThresholdBytes,omitempty" tf:"group_by_two_level_threshold_bytes,omitempty"` + // Timeout for HTTP connection in milliseconds. + HTTPConnectionTimeout *float64 `json:"httpConnectionTimeout,omitempty" tf:"http_connection_timeout,omitempty"` -// Timeout for HTTP connection in milliseconds. -HTTPConnectionTimeout *float64 `json:"httpConnectionTimeout,omitempty" tf:"http_connection_timeout,omitempty"` + // Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress. + HTTPHeadersProgressInterval *float64 `json:"httpHeadersProgressInterval,omitempty" tf:"http_headers_progress_interval,omitempty"` -// Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress. -HTTPHeadersProgressInterval *float64 `json:"httpHeadersProgressInterval,omitempty" tf:"http_headers_progress_interval,omitempty"` + // Timeout for HTTP connection in milliseconds. + HTTPReceiveTimeout *float64 `json:"httpReceiveTimeout,omitempty" tf:"http_receive_timeout,omitempty"` -// Timeout for HTTP connection in milliseconds. -HTTPReceiveTimeout *float64 `json:"httpReceiveTimeout,omitempty" tf:"http_receive_timeout,omitempty"` + // Timeout for HTTP connection in milliseconds. + HTTPSendTimeout *float64 `json:"httpSendTimeout,omitempty" tf:"http_send_timeout,omitempty"` -// Timeout for HTTP connection in milliseconds. -HTTPSendTimeout *float64 `json:"httpSendTimeout,omitempty" tf:"http_send_timeout,omitempty"` + // Connection timeout for establishing connection with replica for Hedged requests. Default value: 50 milliseconds. + HedgedConnectionTimeoutMs *float64 `json:"hedgedConnectionTimeoutMs,omitempty" tf:"hedged_connection_timeout_ms,omitempty"` -// Connection timeout for establishing connection with replica for Hedged requests. Default value: 50 milliseconds. -HedgedConnectionTimeoutMs *float64 `json:"hedgedConnectionTimeoutMs,omitempty" tf:"hedged_connection_timeout_ms,omitempty"` + // Timeout to close idle TCP connections after specified number of seconds. Default value: 3600 seconds. + IdleConnectionTimeout *float64 `json:"idleConnectionTimeout,omitempty" tf:"idle_connection_timeout,omitempty"` -// Timeout to close idle TCP connections after specified number of seconds. Default value: 3600 seconds. -IdleConnectionTimeout *float64 `json:"idleConnectionTimeout,omitempty" tf:"idle_connection_timeout,omitempty"` + // When performing INSERT queries, replace omitted input column values with default values of the respective columns. + InputFormatDefaultsForOmittedFields *bool `json:"inputFormatDefaultsForOmittedFields,omitempty" tf:"input_format_defaults_for_omitted_fields,omitempty"` -// When performing INSERT queries, replace omitted input column values with default values of the respective columns. -InputFormatDefaultsForOmittedFields *bool `json:"inputFormatDefaultsForOmittedFields,omitempty" tf:"input_format_defaults_for_omitted_fields,omitempty"` + // Enables or disables the insertion of JSON data with nested objects. + InputFormatImportNestedJSON *bool `json:"inputFormatImportNestedJson,omitempty" tf:"input_format_import_nested_json,omitempty"` -// Enables or disables the insertion of JSON data with nested objects. -InputFormatImportNestedJSON *bool `json:"inputFormatImportNestedJson,omitempty" tf:"input_format_import_nested_json,omitempty"` + // Enables or disables the initialization of NULL fields with default values, if data type of these fields is not nullable. + InputFormatNullAsDefault *bool `json:"inputFormatNullAsDefault,omitempty" tf:"input_format_null_as_default,omitempty"` -// Enables or disables the initialization of NULL fields with default values, if data type of these fields is not nullable. -InputFormatNullAsDefault *bool `json:"inputFormatNullAsDefault,omitempty" tf:"input_format_null_as_default,omitempty"` + // Enables or disables order-preserving parallel parsing of data formats. Supported only for TSV, TKSV, CSV and JSONEachRow formats. + InputFormatParallelParsing *bool `json:"inputFormatParallelParsing,omitempty" tf:"input_format_parallel_parsing,omitempty"` -// Enables or disables order-preserving parallel parsing of data formats. Supported only for TSV, TKSV, CSV and JSONEachRow formats. -InputFormatParallelParsing *bool `json:"inputFormatParallelParsing,omitempty" tf:"input_format_parallel_parsing,omitempty"` + // Enables or disables the full SQL parser if the fast stream parser can’t parse the data. + InputFormatValuesInterpretExpressions *bool `json:"inputFormatValuesInterpretExpressions,omitempty" tf:"input_format_values_interpret_expressions,omitempty"` -// Enables or disables the full SQL parser if the fast stream parser can’t parse the data. -InputFormatValuesInterpretExpressions *bool `json:"inputFormatValuesInterpretExpressions,omitempty" tf:"input_format_values_interpret_expressions,omitempty"` + // Enables or disables checking the column order when inserting data. + InputFormatWithNamesUseHeader *bool `json:"inputFormatWithNamesUseHeader,omitempty" tf:"input_format_with_names_use_header,omitempty"` -// Enables or disables checking the column order when inserting data. -InputFormatWithNamesUseHeader *bool `json:"inputFormatWithNamesUseHeader,omitempty" tf:"input_format_with_names_use_header,omitempty"` + // The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries. + InsertKeeperMaxRetries *float64 `json:"insertKeeperMaxRetries,omitempty" tf:"insert_keeper_max_retries,omitempty"` -// The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries. -InsertKeeperMaxRetries *float64 `json:"insertKeeperMaxRetries,omitempty" tf:"insert_keeper_max_retries,omitempty"` + // Enables the insertion of default values instead of NULL into columns with not nullable data type. Default value: true. + InsertNullAsDefault *bool `json:"insertNullAsDefault,omitempty" tf:"insert_null_as_default,omitempty"` -// Enables the insertion of default values instead of NULL into columns with not nullable data type. Default value: true. -InsertNullAsDefault *bool `json:"insertNullAsDefault,omitempty" tf:"insert_null_as_default,omitempty"` + // Enables the quorum writes. + InsertQuorum *float64 `json:"insertQuorum,omitempty" tf:"insert_quorum,omitempty"` -// Enables the quorum writes. -InsertQuorum *float64 `json:"insertQuorum,omitempty" tf:"insert_quorum,omitempty"` + // Enables or disables parallelism for quorum INSERT queries. + InsertQuorumParallel *bool `json:"insertQuorumParallel,omitempty" tf:"insert_quorum_parallel,omitempty"` -// Enables or disables parallelism for quorum INSERT queries. -InsertQuorumParallel *bool `json:"insertQuorumParallel,omitempty" tf:"insert_quorum_parallel,omitempty"` + // Write to a quorum timeout in milliseconds. + InsertQuorumTimeout *float64 `json:"insertQuorumTimeout,omitempty" tf:"insert_quorum_timeout,omitempty"` -// Write to a quorum timeout in milliseconds. -InsertQuorumTimeout *float64 `json:"insertQuorumTimeout,omitempty" tf:"insert_quorum_timeout,omitempty"` + // Specifies which JOIN algorithm is used. Possible values: + JoinAlgorithm []*string `json:"joinAlgorithm,omitempty" tf:"join_algorithm,omitempty"` -// Specifies which JOIN algorithm is used. Possible values: -JoinAlgorithm []*string `json:"joinAlgorithm,omitempty" tf:"join_algorithm,omitempty"` + // Sets behaviour on overflow in JOIN. Possible values: + JoinOverflowMode *string `json:"joinOverflowMode,omitempty" tf:"join_overflow_mode,omitempty"` -// Sets behaviour on overflow in JOIN. Possible values: -JoinOverflowMode *string `json:"joinOverflowMode,omitempty" tf:"join_overflow_mode,omitempty"` + // Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting. + JoinUseNulls *bool `json:"joinUseNulls,omitempty" tf:"join_use_nulls,omitempty"` -// Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting. -JoinUseNulls *bool `json:"joinUseNulls,omitempty" tf:"join_use_nulls,omitempty"` + // Require aliases for subselects and table functions in FROM that more than one table is present. + JoinedSubqueryRequiresAlias *bool `json:"joinedSubqueryRequiresAlias,omitempty" tf:"joined_subquery_requires_alias,omitempty"` -// Require aliases for subselects and table functions in FROM that more than one table is present. -JoinedSubqueryRequiresAlias *bool `json:"joinedSubqueryRequiresAlias,omitempty" tf:"joined_subquery_requires_alias,omitempty"` + // Specifies the algorithm of replicas selection that is used for distributed query processing, one of: random, nearest_hostname, in_order, first_or_random, round_robin. Default value: random. + LoadBalancing *string `json:"loadBalancing,omitempty" tf:"load_balancing,omitempty"` -// Specifies the algorithm of replicas selection that is used for distributed query processing, one of: random, nearest_hostname, in_order, first_or_random, round_robin. Default value: random. -LoadBalancing *string `json:"loadBalancing,omitempty" tf:"load_balancing,omitempty"` + // Method of reading data from local filesystem. Possible values: + LocalFilesystemReadMethod *string `json:"localFilesystemReadMethod,omitempty" tf:"local_filesystem_read_method,omitempty"` -// Method of reading data from local filesystem. Possible values: -LocalFilesystemReadMethod *string `json:"localFilesystemReadMethod,omitempty" tf:"local_filesystem_read_method,omitempty"` + // Setting up query threads logging. Query threads log into the system.query_thread_log table. This setting has effect only when log_queries is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the query_thread_log server configuration parameter. Default value: true. + LogQueryThreads *bool `json:"logQueryThreads,omitempty" tf:"log_query_threads,omitempty"` -// Setting up query threads logging. Query threads log into the system.query_thread_log table. This setting has effect only when log_queries is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the query_thread_log server configuration parameter. Default value: true. -LogQueryThreads *bool `json:"logQueryThreads,omitempty" tf:"log_query_threads,omitempty"` + // Allows or restricts using the LowCardinality data type with the Native format. + LowCardinalityAllowInNativeFormat *bool `json:"lowCardinalityAllowInNativeFormat,omitempty" tf:"low_cardinality_allow_in_native_format,omitempty"` -// Allows or restricts using the LowCardinality data type with the Native format. -LowCardinalityAllowInNativeFormat *bool `json:"lowCardinalityAllowInNativeFormat,omitempty" tf:"low_cardinality_allow_in_native_format,omitempty"` + // Maximum abstract syntax tree depth. + MaxAstDepth *float64 `json:"maxAstDepth,omitempty" tf:"max_ast_depth,omitempty"` -// Maximum abstract syntax tree depth. -MaxAstDepth *float64 `json:"maxAstDepth,omitempty" tf:"max_ast_depth,omitempty"` + // Maximum abstract syntax tree elements. + MaxAstElements *float64 `json:"maxAstElements,omitempty" tf:"max_ast_elements,omitempty"` -// Maximum abstract syntax tree elements. -MaxAstElements *float64 `json:"maxAstElements,omitempty" tf:"max_ast_elements,omitempty"` + // A recommendation for what size of the block (in a count of rows) to load from tables. + MaxBlockSize *float64 `json:"maxBlockSize,omitempty" tf:"max_block_size,omitempty"` -// A recommendation for what size of the block (in a count of rows) to load from tables. -MaxBlockSize *float64 `json:"maxBlockSize,omitempty" tf:"max_block_size,omitempty"` + // Limit in bytes for using memoru for GROUP BY before using swap on disk. + MaxBytesBeforeExternalGroupBy *float64 `json:"maxBytesBeforeExternalGroupBy,omitempty" tf:"max_bytes_before_external_group_by,omitempty"` -// Limit in bytes for using memoru for GROUP BY before using swap on disk. -MaxBytesBeforeExternalGroupBy *float64 `json:"maxBytesBeforeExternalGroupBy,omitempty" tf:"max_bytes_before_external_group_by,omitempty"` + // This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation. + MaxBytesBeforeExternalSort *float64 `json:"maxBytesBeforeExternalSort,omitempty" tf:"max_bytes_before_external_sort,omitempty"` -// This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation. -MaxBytesBeforeExternalSort *float64 `json:"maxBytesBeforeExternalSort,omitempty" tf:"max_bytes_before_external_sort,omitempty"` + // Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT. + MaxBytesInDistinct *float64 `json:"maxBytesInDistinct,omitempty" tf:"max_bytes_in_distinct,omitempty"` -// Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT. -MaxBytesInDistinct *float64 `json:"maxBytesInDistinct,omitempty" tf:"max_bytes_in_distinct,omitempty"` + // Limit on maximum size of the hash table for JOIN, in bytes. + MaxBytesInJoin *float64 `json:"maxBytesInJoin,omitempty" tf:"max_bytes_in_join,omitempty"` -// Limit on maximum size of the hash table for JOIN, in bytes. -MaxBytesInJoin *float64 `json:"maxBytesInJoin,omitempty" tf:"max_bytes_in_join,omitempty"` + // Limit on the number of bytes in the set resulting from the execution of the IN section. + MaxBytesInSet *float64 `json:"maxBytesInSet,omitempty" tf:"max_bytes_in_set,omitempty"` -// Limit on the number of bytes in the set resulting from the execution of the IN section. -MaxBytesInSet *float64 `json:"maxBytesInSet,omitempty" tf:"max_bytes_in_set,omitempty"` + // Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query. + MaxBytesToRead *float64 `json:"maxBytesToRead,omitempty" tf:"max_bytes_to_read,omitempty"` -// Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query. -MaxBytesToRead *float64 `json:"maxBytesToRead,omitempty" tf:"max_bytes_to_read,omitempty"` + // Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting. + MaxBytesToSort *float64 `json:"maxBytesToSort,omitempty" tf:"max_bytes_to_sort,omitempty"` -// Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting. -MaxBytesToSort *float64 `json:"maxBytesToSort,omitempty" tf:"max_bytes_to_sort,omitempty"` + // Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. + MaxBytesToTransfer *float64 `json:"maxBytesToTransfer,omitempty" tf:"max_bytes_to_transfer,omitempty"` -// Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. -MaxBytesToTransfer *float64 `json:"maxBytesToTransfer,omitempty" tf:"max_bytes_to_transfer,omitempty"` + // Limits the maximum number of columns that can be read from a table in a single query. + MaxColumnsToRead *float64 `json:"maxColumnsToRead,omitempty" tf:"max_columns_to_read,omitempty"` -// Limits the maximum number of columns that can be read from a table in a single query. -MaxColumnsToRead *float64 `json:"maxColumnsToRead,omitempty" tf:"max_columns_to_read,omitempty"` + // The maximum number of concurrent requests per user. Default value: 0 (no limit). + MaxConcurrentQueriesForUser *float64 `json:"maxConcurrentQueriesForUser,omitempty" tf:"max_concurrent_queries_for_user,omitempty"` -// The maximum number of concurrent requests per user. Default value: 0 (no limit). -MaxConcurrentQueriesForUser *float64 `json:"maxConcurrentQueriesForUser,omitempty" tf:"max_concurrent_queries_for_user,omitempty"` + // Limits the maximum query execution time in milliseconds. + MaxExecutionTime *float64 `json:"maxExecutionTime,omitempty" tf:"max_execution_time,omitempty"` -// Limits the maximum query execution time in milliseconds. -MaxExecutionTime *float64 `json:"maxExecutionTime,omitempty" tf:"max_execution_time,omitempty"` + // Maximum abstract syntax tree depth after after expansion of aliases. + MaxExpandedAstElements *float64 `json:"maxExpandedAstElements,omitempty" tf:"max_expanded_ast_elements,omitempty"` -// Maximum abstract syntax tree depth after after expansion of aliases. -MaxExpandedAstElements *float64 `json:"maxExpandedAstElements,omitempty" tf:"max_expanded_ast_elements,omitempty"` + // Sets the maximum number of parallel threads for the SELECT query data read phase with the FINAL modifier. + MaxFinalThreads *float64 `json:"maxFinalThreads,omitempty" tf:"max_final_threads,omitempty"` -// Sets the maximum number of parallel threads for the SELECT query data read phase with the FINAL modifier. -MaxFinalThreads *float64 `json:"maxFinalThreads,omitempty" tf:"max_final_threads,omitempty"` + // Limits the maximum number of HTTP GET redirect hops for URL-engine tables. + MaxHTTPGetRedirects *float64 `json:"maxHttpGetRedirects,omitempty" tf:"max_http_get_redirects,omitempty"` -// Limits the maximum number of HTTP GET redirect hops for URL-engine tables. -MaxHTTPGetRedirects *float64 `json:"maxHttpGetRedirects,omitempty" tf:"max_http_get_redirects,omitempty"` + // The size of blocks (in a count of rows) to form for insertion into a table. + MaxInsertBlockSize *float64 `json:"maxInsertBlockSize,omitempty" tf:"max_insert_block_size,omitempty"` -// The size of blocks (in a count of rows) to form for insertion into a table. -MaxInsertBlockSize *float64 `json:"maxInsertBlockSize,omitempty" tf:"max_insert_block_size,omitempty"` + // The maximum number of threads to execute the INSERT SELECT query. Default value: 0. + MaxInsertThreads *float64 `json:"maxInsertThreads,omitempty" tf:"max_insert_threads,omitempty"` -// The maximum number of threads to execute the INSERT SELECT query. Default value: 0. -MaxInsertThreads *float64 `json:"maxInsertThreads,omitempty" tf:"max_insert_threads,omitempty"` + // Limits the maximum memory usage (in bytes) for processing queries on a single server. + MaxMemoryUsage *float64 `json:"maxMemoryUsage,omitempty" tf:"max_memory_usage,omitempty"` -// Limits the maximum memory usage (in bytes) for processing queries on a single server. -MaxMemoryUsage *float64 `json:"maxMemoryUsage,omitempty" tf:"max_memory_usage,omitempty"` + // Limits the maximum memory usage (in bytes) for processing of user's queries on a single server. + MaxMemoryUsageForUser *float64 `json:"maxMemoryUsageForUser,omitempty" tf:"max_memory_usage_for_user,omitempty"` -// Limits the maximum memory usage (in bytes) for processing of user's queries on a single server. -MaxMemoryUsageForUser *float64 `json:"maxMemoryUsageForUser,omitempty" tf:"max_memory_usage_for_user,omitempty"` + // Limits the speed of the data exchange over the network in bytes per second. + MaxNetworkBandwidth *float64 `json:"maxNetworkBandwidth,omitempty" tf:"max_network_bandwidth,omitempty"` -// Limits the speed of the data exchange over the network in bytes per second. -MaxNetworkBandwidth *float64 `json:"maxNetworkBandwidth,omitempty" tf:"max_network_bandwidth,omitempty"` + // Limits the speed of the data exchange over the network in bytes per second. + MaxNetworkBandwidthForUser *float64 `json:"maxNetworkBandwidthForUser,omitempty" tf:"max_network_bandwidth_for_user,omitempty"` -// Limits the speed of the data exchange over the network in bytes per second. -MaxNetworkBandwidthForUser *float64 `json:"maxNetworkBandwidthForUser,omitempty" tf:"max_network_bandwidth_for_user,omitempty"` + // Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size. Zero means unlimited. + MaxParserDepth *float64 `json:"maxParserDepth,omitempty" tf:"max_parser_depth,omitempty"` -// Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size. Zero means unlimited. -MaxParserDepth *float64 `json:"maxParserDepth,omitempty" tf:"max_parser_depth,omitempty"` + // The maximum part of a query that can be taken to RAM for parsing with the SQL parser. + MaxQuerySize *float64 `json:"maxQuerySize,omitempty" tf:"max_query_size,omitempty"` -// The maximum part of a query that can be taken to RAM for parsing with the SQL parser. -MaxQuerySize *float64 `json:"maxQuerySize,omitempty" tf:"max_query_size,omitempty"` + // The maximum size of the buffer to read from the filesystem. + MaxReadBufferSize *float64 `json:"maxReadBufferSize,omitempty" tf:"max_read_buffer_size,omitempty"` -// The maximum size of the buffer to read from the filesystem. -MaxReadBufferSize *float64 `json:"maxReadBufferSize,omitempty" tf:"max_read_buffer_size,omitempty"` + // Disables lagging replicas for distributed queries. + MaxReplicaDelayForDistributedQueries *float64 `json:"maxReplicaDelayForDistributedQueries,omitempty" tf:"max_replica_delay_for_distributed_queries,omitempty"` -// Disables lagging replicas for distributed queries. -MaxReplicaDelayForDistributedQueries *float64 `json:"maxReplicaDelayForDistributedQueries,omitempty" tf:"max_replica_delay_for_distributed_queries,omitempty"` + // Limits the number of bytes in the result. + MaxResultBytes *float64 `json:"maxResultBytes,omitempty" tf:"max_result_bytes,omitempty"` -// Limits the number of bytes in the result. -MaxResultBytes *float64 `json:"maxResultBytes,omitempty" tf:"max_result_bytes,omitempty"` + // Limits the number of rows in the result. + MaxResultRows *float64 `json:"maxResultRows,omitempty" tf:"max_result_rows,omitempty"` -// Limits the number of rows in the result. -MaxResultRows *float64 `json:"maxResultRows,omitempty" tf:"max_result_rows,omitempty"` + // Limits the maximum number of different rows when using DISTINCT. + MaxRowsInDistinct *float64 `json:"maxRowsInDistinct,omitempty" tf:"max_rows_in_distinct,omitempty"` -// Limits the maximum number of different rows when using DISTINCT. -MaxRowsInDistinct *float64 `json:"maxRowsInDistinct,omitempty" tf:"max_rows_in_distinct,omitempty"` + // Limit on maximum size of the hash table for JOIN, in rows. + MaxRowsInJoin *float64 `json:"maxRowsInJoin,omitempty" tf:"max_rows_in_join,omitempty"` -// Limit on maximum size of the hash table for JOIN, in rows. -MaxRowsInJoin *float64 `json:"maxRowsInJoin,omitempty" tf:"max_rows_in_join,omitempty"` + // Limit on the number of rows in the set resulting from the execution of the IN section. + MaxRowsInSet *float64 `json:"maxRowsInSet,omitempty" tf:"max_rows_in_set,omitempty"` -// Limit on the number of rows in the set resulting from the execution of the IN section. -MaxRowsInSet *float64 `json:"maxRowsInSet,omitempty" tf:"max_rows_in_set,omitempty"` + // Limits the maximum number of unique keys received from aggregation function. + MaxRowsToGroupBy *float64 `json:"maxRowsToGroupBy,omitempty" tf:"max_rows_to_group_by,omitempty"` -// Limits the maximum number of unique keys received from aggregation function. -MaxRowsToGroupBy *float64 `json:"maxRowsToGroupBy,omitempty" tf:"max_rows_to_group_by,omitempty"` + // Limits the maximum number of rows that can be read from a table when running a query. + MaxRowsToRead *float64 `json:"maxRowsToRead,omitempty" tf:"max_rows_to_read,omitempty"` -// Limits the maximum number of rows that can be read from a table when running a query. -MaxRowsToRead *float64 `json:"maxRowsToRead,omitempty" tf:"max_rows_to_read,omitempty"` + // Limits the maximum number of rows that can be read from a table for sorting. + MaxRowsToSort *float64 `json:"maxRowsToSort,omitempty" tf:"max_rows_to_sort,omitempty"` -// Limits the maximum number of rows that can be read from a table for sorting. -MaxRowsToSort *float64 `json:"maxRowsToSort,omitempty" tf:"max_rows_to_sort,omitempty"` + // Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. + MaxRowsToTransfer *float64 `json:"maxRowsToTransfer,omitempty" tf:"max_rows_to_transfer,omitempty"` -// Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. -MaxRowsToTransfer *float64 `json:"maxRowsToTransfer,omitempty" tf:"max_rows_to_transfer,omitempty"` + // Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns. + MaxTemporaryColumns *float64 `json:"maxTemporaryColumns,omitempty" tf:"max_temporary_columns,omitempty"` -// Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns. -MaxTemporaryColumns *float64 `json:"maxTemporaryColumns,omitempty" tf:"max_temporary_columns,omitempty"` + // The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries. Zero means unlimited. + MaxTemporaryDataOnDiskSizeForQuery *float64 `json:"maxTemporaryDataOnDiskSizeForQuery,omitempty" tf:"max_temporary_data_on_disk_size_for_query,omitempty"` -// The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries. Zero means unlimited. -MaxTemporaryDataOnDiskSizeForQuery *float64 `json:"maxTemporaryDataOnDiskSizeForQuery,omitempty" tf:"max_temporary_data_on_disk_size_for_query,omitempty"` + // The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running user queries. Zero means unlimited. + MaxTemporaryDataOnDiskSizeForUser *float64 `json:"maxTemporaryDataOnDiskSizeForUser,omitempty" tf:"max_temporary_data_on_disk_size_for_user,omitempty"` -// The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running user queries. Zero means unlimited. -MaxTemporaryDataOnDiskSizeForUser *float64 `json:"maxTemporaryDataOnDiskSizeForUser,omitempty" tf:"max_temporary_data_on_disk_size_for_user,omitempty"` + // Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns. + MaxTemporaryNonConstColumns *float64 `json:"maxTemporaryNonConstColumns,omitempty" tf:"max_temporary_non_const_columns,omitempty"` -// Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns. -MaxTemporaryNonConstColumns *float64 `json:"maxTemporaryNonConstColumns,omitempty" tf:"max_temporary_non_const_columns,omitempty"` + // The maximum number of query processing threads, excluding threads for retrieving data from remote servers. + MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` -// The maximum number of query processing threads, excluding threads for retrieving data from remote servers. -MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` + // It represents soft memory limit in case when hard limit is reached on user level. This value is used to compute overcommit ratio for the query. Zero means skip the query. + MemoryOvercommitRatioDenominator *float64 `json:"memoryOvercommitRatioDenominator,omitempty" tf:"memory_overcommit_ratio_denominator,omitempty"` -// It represents soft memory limit in case when hard limit is reached on user level. This value is used to compute overcommit ratio for the query. Zero means skip the query. -MemoryOvercommitRatioDenominator *float64 `json:"memoryOvercommitRatioDenominator,omitempty" tf:"memory_overcommit_ratio_denominator,omitempty"` + // It represents soft memory limit in case when hard limit is reached on global level. This value is used to compute overcommit ratio for the query. Zero means skip the query. + MemoryOvercommitRatioDenominatorForUser *float64 `json:"memoryOvercommitRatioDenominatorForUser,omitempty" tf:"memory_overcommit_ratio_denominator_for_user,omitempty"` -// It represents soft memory limit in case when hard limit is reached on global level. This value is used to compute overcommit ratio for the query. Zero means skip the query. -MemoryOvercommitRatioDenominatorForUser *float64 `json:"memoryOvercommitRatioDenominatorForUser,omitempty" tf:"memory_overcommit_ratio_denominator_for_user,omitempty"` + // Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Possible values: from 0 to 1. Default: 0. + MemoryProfilerSampleProbability *float64 `json:"memoryProfilerSampleProbability,omitempty" tf:"memory_profiler_sample_probability,omitempty"` -// Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Possible values: from 0 to 1. Default: 0. -MemoryProfilerSampleProbability *float64 `json:"memoryProfilerSampleProbability,omitempty" tf:"memory_profiler_sample_probability,omitempty"` + // Memory profiler step (in bytes). If the next query step requires more memory than this parameter specifies, the memory profiler collects the allocating stack trace. Values lower than a few megabytes slow down query processing. Default value: 4194304 (4 MB). Zero means disabled memory profiler. + MemoryProfilerStep *float64 `json:"memoryProfilerStep,omitempty" tf:"memory_profiler_step,omitempty"` -// Memory profiler step (in bytes). If the next query step requires more memory than this parameter specifies, the memory profiler collects the allocating stack trace. Values lower than a few megabytes slow down query processing. Default value: 4194304 (4 MB). Zero means disabled memory profiler. -MemoryProfilerStep *float64 `json:"memoryProfilerStep,omitempty" tf:"memory_profiler_step,omitempty"` + // Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. If the timeout is reached and memory is not freed, an exception is thrown. + MemoryUsageOvercommitMaxWaitMicroseconds *float64 `json:"memoryUsageOvercommitMaxWaitMicroseconds,omitempty" tf:"memory_usage_overcommit_max_wait_microseconds,omitempty"` -// Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. If the timeout is reached and memory is not freed, an exception is thrown. -MemoryUsageOvercommitMaxWaitMicroseconds *float64 `json:"memoryUsageOvercommitMaxWaitMicroseconds,omitempty" tf:"memory_usage_overcommit_max_wait_microseconds,omitempty"` + // If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks. + MergeTreeMaxBytesToUseCache *float64 `json:"mergeTreeMaxBytesToUseCache,omitempty" tf:"merge_tree_max_bytes_to_use_cache,omitempty"` -// If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks. -MergeTreeMaxBytesToUseCache *float64 `json:"mergeTreeMaxBytesToUseCache,omitempty" tf:"merge_tree_max_bytes_to_use_cache,omitempty"` + // If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks. + MergeTreeMaxRowsToUseCache *float64 `json:"mergeTreeMaxRowsToUseCache,omitempty" tf:"merge_tree_max_rows_to_use_cache,omitempty"` -// If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks. -MergeTreeMaxRowsToUseCache *float64 `json:"mergeTreeMaxRowsToUseCache,omitempty" tf:"merge_tree_max_rows_to_use_cache,omitempty"` + // If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads. + MergeTreeMinBytesForConcurrentRead *float64 `json:"mergeTreeMinBytesForConcurrentRead,omitempty" tf:"merge_tree_min_bytes_for_concurrent_read,omitempty"` -// If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads. -MergeTreeMinBytesForConcurrentRead *float64 `json:"mergeTreeMinBytesForConcurrentRead,omitempty" tf:"merge_tree_min_bytes_for_concurrent_read,omitempty"` + // If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads. + MergeTreeMinRowsForConcurrentRead *float64 `json:"mergeTreeMinRowsForConcurrentRead,omitempty" tf:"merge_tree_min_rows_for_concurrent_read,omitempty"` -// If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads. -MergeTreeMinRowsForConcurrentRead *float64 `json:"mergeTreeMinRowsForConcurrentRead,omitempty" tf:"merge_tree_min_rows_for_concurrent_read,omitempty"` + // The minimum data volume required for using direct I/O access to the storage disk. + MinBytesToUseDirectIo *float64 `json:"minBytesToUseDirectIo,omitempty" tf:"min_bytes_to_use_direct_io,omitempty"` -// The minimum data volume required for using direct I/O access to the storage disk. -MinBytesToUseDirectIo *float64 `json:"minBytesToUseDirectIo,omitempty" tf:"min_bytes_to_use_direct_io,omitempty"` + // How many times to potentially use a compiled chunk of code before running compilation. + MinCountToCompile *float64 `json:"minCountToCompile,omitempty" tf:"min_count_to_compile,omitempty"` -// How many times to potentially use a compiled chunk of code before running compilation. -MinCountToCompile *float64 `json:"minCountToCompile,omitempty" tf:"min_count_to_compile,omitempty"` + // A query waits for expression compilation process to complete prior to continuing execution. + MinCountToCompileExpression *float64 `json:"minCountToCompileExpression,omitempty" tf:"min_count_to_compile_expression,omitempty"` -// A query waits for expression compilation process to complete prior to continuing execution. -MinCountToCompileExpression *float64 `json:"minCountToCompileExpression,omitempty" tf:"min_count_to_compile_expression,omitempty"` + // Minimal execution speed in rows per second. + MinExecutionSpeed *float64 `json:"minExecutionSpeed,omitempty" tf:"min_execution_speed,omitempty"` -// Minimal execution speed in rows per second. -MinExecutionSpeed *float64 `json:"minExecutionSpeed,omitempty" tf:"min_execution_speed,omitempty"` + // Minimal execution speed in bytes per second. + MinExecutionSpeedBytes *float64 `json:"minExecutionSpeedBytes,omitempty" tf:"min_execution_speed_bytes,omitempty"` -// Minimal execution speed in bytes per second. -MinExecutionSpeedBytes *float64 `json:"minExecutionSpeedBytes,omitempty" tf:"min_execution_speed_bytes,omitempty"` + // Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query. + MinInsertBlockSizeBytes *float64 `json:"minInsertBlockSizeBytes,omitempty" tf:"min_insert_block_size_bytes,omitempty"` -// Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query. -MinInsertBlockSizeBytes *float64 `json:"minInsertBlockSizeBytes,omitempty" tf:"min_insert_block_size_bytes,omitempty"` + // Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query. + MinInsertBlockSizeRows *float64 `json:"minInsertBlockSizeRows,omitempty" tf:"min_insert_block_size_rows,omitempty"` -// Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query. -MinInsertBlockSizeRows *float64 `json:"minInsertBlockSizeRows,omitempty" tf:"min_insert_block_size_rows,omitempty"` + // If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. + OutputFormatJSONQuote64BitIntegers *bool `json:"outputFormatJsonQuote64BitIntegers,omitempty" tf:"output_format_json_quote_64bit_integers,omitempty"` -// If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. -OutputFormatJSONQuote64BitIntegers *bool `json:"outputFormatJsonQuote64BitIntegers,omitempty" tf:"output_format_json_quote_64bit_integers,omitempty"` + // Enables +nan, -nan, +inf, -inf outputs in JSON output format. + OutputFormatJSONQuoteDenormals *bool `json:"outputFormatJsonQuoteDenormals,omitempty" tf:"output_format_json_quote_denormals,omitempty"` -// Enables +nan, -nan, +inf, -inf outputs in JSON output format. -OutputFormatJSONQuoteDenormals *bool `json:"outputFormatJsonQuoteDenormals,omitempty" tf:"output_format_json_quote_denormals,omitempty"` + // Enables/disables preferable using the localhost replica when processing distributed queries. Default value: true. + PreferLocalhostReplica *bool `json:"preferLocalhostReplica,omitempty" tf:"prefer_localhost_replica,omitempty"` -// Enables/disables preferable using the localhost replica when processing distributed queries. Default value: true. -PreferLocalhostReplica *bool `json:"preferLocalhostReplica,omitempty" tf:"prefer_localhost_replica,omitempty"` + // Query priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// Query priority. -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // Quota accounting mode. + QuotaMode *string `json:"quotaMode,omitempty" tf:"quota_mode,omitempty"` -// Quota accounting mode. -QuotaMode *string `json:"quotaMode,omitempty" tf:"quota_mode,omitempty"` + // Sets behaviour on overflow while read. Possible values: + ReadOverflowMode *string `json:"readOverflowMode,omitempty" tf:"read_overflow_mode,omitempty"` -// Sets behaviour on overflow while read. Possible values: -ReadOverflowMode *string `json:"readOverflowMode,omitempty" tf:"read_overflow_mode,omitempty"` + // Restricts permissions for reading data, write data and change settings queries. + Readonly *float64 `json:"readonly,omitempty" tf:"readonly,omitempty"` -// Restricts permissions for reading data, write data and change settings queries. -Readonly *float64 `json:"readonly,omitempty" tf:"readonly,omitempty"` + // Receive timeout in milliseconds on the socket used for communicating with the client. + ReceiveTimeout *float64 `json:"receiveTimeout,omitempty" tf:"receive_timeout,omitempty"` -// Receive timeout in milliseconds on the socket used for communicating with the client. -ReceiveTimeout *float64 `json:"receiveTimeout,omitempty" tf:"receive_timeout,omitempty"` + // Method of reading data from remote filesystem, one of: read, threadpool. + RemoteFilesystemReadMethod *string `json:"remoteFilesystemReadMethod,omitempty" tf:"remote_filesystem_read_method,omitempty"` -// Method of reading data from remote filesystem, one of: read, threadpool. -RemoteFilesystemReadMethod *string `json:"remoteFilesystemReadMethod,omitempty" tf:"remote_filesystem_read_method,omitempty"` + // For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting. + ReplicationAlterPartitionsSync *float64 `json:"replicationAlterPartitionsSync,omitempty" tf:"replication_alter_partitions_sync,omitempty"` -// For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting. -ReplicationAlterPartitionsSync *float64 `json:"replicationAlterPartitionsSync,omitempty" tf:"replication_alter_partitions_sync,omitempty"` + // Sets behaviour on overflow in result. Possible values: + ResultOverflowMode *string `json:"resultOverflowMode,omitempty" tf:"result_overflow_mode,omitempty"` -// Sets behaviour on overflow in result. Possible values: -ResultOverflowMode *string `json:"resultOverflowMode,omitempty" tf:"result_overflow_mode,omitempty"` + // Enables or disables sequential consistency for SELECT queries. + SelectSequentialConsistency *bool `json:"selectSequentialConsistency,omitempty" tf:"select_sequential_consistency,omitempty"` -// Enables or disables sequential consistency for SELECT queries. -SelectSequentialConsistency *bool `json:"selectSequentialConsistency,omitempty" tf:"select_sequential_consistency,omitempty"` + // Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses. + SendProgressInHTTPHeaders *bool `json:"sendProgressInHttpHeaders,omitempty" tf:"send_progress_in_http_headers,omitempty"` -// Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses. -SendProgressInHTTPHeaders *bool `json:"sendProgressInHttpHeaders,omitempty" tf:"send_progress_in_http_headers,omitempty"` + // Send timeout in milliseconds on the socket used for communicating with the client. + SendTimeout *float64 `json:"sendTimeout,omitempty" tf:"send_timeout,omitempty"` -// Send timeout in milliseconds on the socket used for communicating with the client. -SendTimeout *float64 `json:"sendTimeout,omitempty" tf:"send_timeout,omitempty"` + // Sets behaviour on overflow in the set resulting. Possible values: + SetOverflowMode *string `json:"setOverflowMode,omitempty" tf:"set_overflow_mode,omitempty"` -// Sets behaviour on overflow in the set resulting. Possible values: -SetOverflowMode *string `json:"setOverflowMode,omitempty" tf:"set_overflow_mode,omitempty"` + // Enables or disables silently skipping of unavailable shards. + SkipUnavailableShards *bool `json:"skipUnavailableShards,omitempty" tf:"skip_unavailable_shards,omitempty"` -// Enables or disables silently skipping of unavailable shards. -SkipUnavailableShards *bool `json:"skipUnavailableShards,omitempty" tf:"skip_unavailable_shards,omitempty"` + // Sets behaviour on overflow while sort. Possible values: + SortOverflowMode *string `json:"sortOverflowMode,omitempty" tf:"sort_overflow_mode,omitempty"` -// Sets behaviour on overflow while sort. Possible values: -SortOverflowMode *string `json:"sortOverflowMode,omitempty" tf:"sort_overflow_mode,omitempty"` + // Timeout (in seconds) between checks of execution speed. It is checked that execution speed is not less that specified in min_execution_speed parameter. Must be at least 1000. + TimeoutBeforeCheckingExecutionSpeed *float64 `json:"timeoutBeforeCheckingExecutionSpeed,omitempty" tf:"timeout_before_checking_execution_speed,omitempty"` -// Timeout (in seconds) between checks of execution speed. It is checked that execution speed is not less that specified in min_execution_speed parameter. Must be at least 1000. -TimeoutBeforeCheckingExecutionSpeed *float64 `json:"timeoutBeforeCheckingExecutionSpeed,omitempty" tf:"timeout_before_checking_execution_speed,omitempty"` + // Sets behaviour on overflow. Possible values: + TimeoutOverflowMode *string `json:"timeoutOverflowMode,omitempty" tf:"timeout_overflow_mode,omitempty"` -// Sets behaviour on overflow. Possible values: -TimeoutOverflowMode *string `json:"timeoutOverflowMode,omitempty" tf:"timeout_overflow_mode,omitempty"` + // Sets behaviour on overflow. Possible values: + TransferOverflowMode *string `json:"transferOverflowMode,omitempty" tf:"transfer_overflow_mode,omitempty"` -// Sets behaviour on overflow. Possible values: -TransferOverflowMode *string `json:"transferOverflowMode,omitempty" tf:"transfer_overflow_mode,omitempty"` + // Enables equality of NULL values for IN operator. + TransformNullIn *bool `json:"transformNullIn,omitempty" tf:"transform_null_in,omitempty"` -// Enables equality of NULL values for IN operator. -TransformNullIn *bool `json:"transformNullIn,omitempty" tf:"transform_null_in,omitempty"` + // Enables hedged requests logic for remote queries. It allows to establish many connections with different replicas for query. New connection is enabled in case existent connection(s) with replica(s) were not established within hedged_connection_timeout or no data was received within receive_data_timeout. Query uses the first connection which send non empty progress packet (or data packet, if allow_changing_replica_until_first_data_packet); other connections are cancelled. Queries with max_parallel_replicas > 1 are supported. Default value: true. + UseHedgedRequests *bool `json:"useHedgedRequests,omitempty" tf:"use_hedged_requests,omitempty"` -// Enables hedged requests logic for remote queries. It allows to establish many connections with different replicas for query. New connection is enabled in case existent connection(s) with replica(s) were not established within hedged_connection_timeout or no data was received within receive_data_timeout. Query uses the first connection which send non empty progress packet (or data packet, if allow_changing_replica_until_first_data_packet); other connections are cancelled. Queries with max_parallel_replicas > 1 are supported. Default value: true. -UseHedgedRequests *bool `json:"useHedgedRequests,omitempty" tf:"use_hedged_requests,omitempty"` + // Whether to use a cache of uncompressed blocks. + UseUncompressedCache *bool `json:"useUncompressedCache,omitempty" tf:"use_uncompressed_cache,omitempty"` -// Whether to use a cache of uncompressed blocks. -UseUncompressedCache *bool `json:"useUncompressedCache,omitempty" tf:"use_uncompressed_cache,omitempty"` + // Enables waiting for processing of asynchronous insertion. If enabled, server returns OK only after the data is inserted. + WaitForAsyncInsert *bool `json:"waitForAsyncInsert,omitempty" tf:"wait_for_async_insert,omitempty"` -// Enables waiting for processing of asynchronous insertion. If enabled, server returns OK only after the data is inserted. -WaitForAsyncInsert *bool `json:"waitForAsyncInsert,omitempty" tf:"wait_for_async_insert,omitempty"` - -// The timeout (in seconds) for waiting for processing of asynchronous insertion. Value must be at least 1000 (1 second). -WaitForAsyncInsertTimeout *float64 `json:"waitForAsyncInsertTimeout,omitempty" tf:"wait_for_async_insert_timeout,omitempty"` + // The timeout (in seconds) for waiting for processing of asynchronous insertion. Value must be at least 1000 (1 second). + WaitForAsyncInsertTimeout *float64 `json:"waitForAsyncInsertTimeout,omitempty" tf:"wait_for_async_insert_timeout,omitempty"` } - type UserSettingsObservation struct { + // Include CORS headers in HTTP responces. + AddHTTPCorsHeader *bool `json:"addHttpCorsHeader,omitempty" tf:"add_http_cors_header,omitempty"` -// Include CORS headers in HTTP responces. -AddHTTPCorsHeader *bool `json:"addHttpCorsHeader,omitempty" tf:"add_http_cors_header,omitempty"` - -// Allows or denies DDL queries. -AllowDdl *bool `json:"allowDdl,omitempty" tf:"allow_ddl,omitempty"` + // Allows or denies DDL queries. + AllowDdl *bool `json:"allowDdl,omitempty" tf:"allow_ddl,omitempty"` -// Enables introspections functions for query profiling. -AllowIntrospectionFunctions *bool `json:"allowIntrospectionFunctions,omitempty" tf:"allow_introspection_functions,omitempty"` + // Enables introspections functions for query profiling. + AllowIntrospectionFunctions *bool `json:"allowIntrospectionFunctions,omitempty" tf:"allow_introspection_functions,omitempty"` -// Allows specifying LowCardinality modifier for types of small fixed size (8 or less) in CREATE TABLE statements. Enabling this may increase merge times and memory consumption. -AllowSuspiciousLowCardinalityTypes *bool `json:"allowSuspiciousLowCardinalityTypes,omitempty" tf:"allow_suspicious_low_cardinality_types,omitempty"` + // Allows specifying LowCardinality modifier for types of small fixed size (8 or less) in CREATE TABLE statements. Enabling this may increase merge times and memory consumption. + AllowSuspiciousLowCardinalityTypes *bool `json:"allowSuspiciousLowCardinalityTypes,omitempty" tf:"allow_suspicious_low_cardinality_types,omitempty"` -// enables legacy ClickHouse server behaviour in ANY INNER|LEFT JOIN operations. -AnyJoinDistinctRightTableKeys *bool `json:"anyJoinDistinctRightTableKeys,omitempty" tf:"any_join_distinct_right_table_keys,omitempty"` + // enables legacy ClickHouse server behaviour in ANY INNER|LEFT JOIN operations. + AnyJoinDistinctRightTableKeys *bool `json:"anyJoinDistinctRightTableKeys,omitempty" tf:"any_join_distinct_right_table_keys,omitempty"` -// Enables asynchronous inserts. Disabled by default. -AsyncInsert *bool `json:"asyncInsert,omitempty" tf:"async_insert,omitempty"` + // Enables asynchronous inserts. Disabled by default. + AsyncInsert *bool `json:"asyncInsert,omitempty" tf:"async_insert,omitempty"` -// The maximum timeout in milliseconds since the first INSERT query before inserting collected data. If the parameter is set to 0, the timeout is disabled. Default value: 200. -AsyncInsertBusyTimeout *float64 `json:"asyncInsertBusyTimeout,omitempty" tf:"async_insert_busy_timeout,omitempty"` + // The maximum timeout in milliseconds since the first INSERT query before inserting collected data. If the parameter is set to 0, the timeout is disabled. Default value: 200. + AsyncInsertBusyTimeout *float64 `json:"asyncInsertBusyTimeout,omitempty" tf:"async_insert_busy_timeout,omitempty"` -// The maximum size of the unparsed data in bytes collected per query before being inserted. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 100000. -AsyncInsertMaxDataSize *float64 `json:"asyncInsertMaxDataSize,omitempty" tf:"async_insert_max_data_size,omitempty"` + // The maximum size of the unparsed data in bytes collected per query before being inserted. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 100000. + AsyncInsertMaxDataSize *float64 `json:"asyncInsertMaxDataSize,omitempty" tf:"async_insert_max_data_size,omitempty"` -// The maximum timeout in milliseconds since the last INSERT query before dumping collected data. If enabled, the settings prolongs the async_insert_busy_timeout with every INSERT query as long as async_insert_max_data_size is not exceeded. -AsyncInsertStaleTimeout *float64 `json:"asyncInsertStaleTimeout,omitempty" tf:"async_insert_stale_timeout,omitempty"` + // The maximum timeout in milliseconds since the last INSERT query before dumping collected data. If enabled, the settings prolongs the async_insert_busy_timeout with every INSERT query as long as async_insert_max_data_size is not exceeded. + AsyncInsertStaleTimeout *float64 `json:"asyncInsertStaleTimeout,omitempty" tf:"async_insert_stale_timeout,omitempty"` -// The maximum number of threads for background data parsing and insertion. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 16. -AsyncInsertThreads *float64 `json:"asyncInsertThreads,omitempty" tf:"async_insert_threads,omitempty"` + // The maximum number of threads for background data parsing and insertion. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 16. + AsyncInsertThreads *float64 `json:"asyncInsertThreads,omitempty" tf:"async_insert_threads,omitempty"` -// Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. Default value: false. -CancelHTTPReadonlyQueriesOnClientClose *bool `json:"cancelHttpReadonlyQueriesOnClientClose,omitempty" tf:"cancel_http_readonly_queries_on_client_close,omitempty"` + // Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. Default value: false. + CancelHTTPReadonlyQueriesOnClientClose *bool `json:"cancelHttpReadonlyQueriesOnClientClose,omitempty" tf:"cancel_http_readonly_queries_on_client_close,omitempty"` -// Enable compilation of queries. -Compile *bool `json:"compile,omitempty" tf:"compile,omitempty"` + // Enable compilation of queries. + Compile *bool `json:"compile,omitempty" tf:"compile,omitempty"` -// Turn on expression compilation. -CompileExpressions *bool `json:"compileExpressions,omitempty" tf:"compile_expressions,omitempty"` + // Turn on expression compilation. + CompileExpressions *bool `json:"compileExpressions,omitempty" tf:"compile_expressions,omitempty"` -// Connect timeout in milliseconds on the socket used for communicating with the client. -ConnectTimeout *float64 `json:"connectTimeout,omitempty" tf:"connect_timeout,omitempty"` + // Connect timeout in milliseconds on the socket used for communicating with the client. + ConnectTimeout *float64 `json:"connectTimeout,omitempty" tf:"connect_timeout,omitempty"` -// The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition. If unsuccessful, several attempts are made to connect to various replicas. Default value: 50. -ConnectTimeoutWithFailover *float64 `json:"connectTimeoutWithFailover,omitempty" tf:"connect_timeout_with_failover,omitempty"` + // The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition. If unsuccessful, several attempts are made to connect to various replicas. Default value: 50. + ConnectTimeoutWithFailover *float64 `json:"connectTimeoutWithFailover,omitempty" tf:"connect_timeout_with_failover,omitempty"` -// Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction. -CountDistinctImplementation *string `json:"countDistinctImplementation,omitempty" tf:"count_distinct_implementation,omitempty"` + // Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction. + CountDistinctImplementation *string `json:"countDistinctImplementation,omitempty" tf:"count_distinct_implementation,omitempty"` -// Allows choosing a parser of the text representation of date and time, one of: best_effort, basic, best_effort_us. Default value: basic. Cloud default value: best_effort. -DateTimeInputFormat *string `json:"dateTimeInputFormat,omitempty" tf:"date_time_input_format,omitempty"` + // Allows choosing a parser of the text representation of date and time, one of: best_effort, basic, best_effort_us. Default value: basic. Cloud default value: best_effort. + DateTimeInputFormat *string `json:"dateTimeInputFormat,omitempty" tf:"date_time_input_format,omitempty"` -// Allows choosing different output formats of the text representation of date and time, one of: simple, iso, unix_timestamp. Default value: simple. -DateTimeOutputFormat *string `json:"dateTimeOutputFormat,omitempty" tf:"date_time_output_format,omitempty"` + // Allows choosing different output formats of the text representation of date and time, one of: simple, iso, unix_timestamp. Default value: simple. + DateTimeOutputFormat *string `json:"dateTimeOutputFormat,omitempty" tf:"date_time_output_format,omitempty"` -// Enables or disables the deduplication check for materialized views that receive data from Replicated* tables. -DeduplicateBlocksInDependentMaterializedViews *bool `json:"deduplicateBlocksInDependentMaterializedViews,omitempty" tf:"deduplicate_blocks_in_dependent_materialized_views,omitempty"` + // Enables or disables the deduplication check for materialized views that receive data from Replicated* tables. + DeduplicateBlocksInDependentMaterializedViews *bool `json:"deduplicateBlocksInDependentMaterializedViews,omitempty" tf:"deduplicate_blocks_in_dependent_materialized_views,omitempty"` -// Sets behaviour on overflow when using DISTINCT. Possible values: -DistinctOverflowMode *string `json:"distinctOverflowMode,omitempty" tf:"distinct_overflow_mode,omitempty"` + // Sets behaviour on overflow when using DISTINCT. Possible values: + DistinctOverflowMode *string `json:"distinctOverflowMode,omitempty" tf:"distinct_overflow_mode,omitempty"` -// Determine the behavior of distributed subqueries. -DistributedAggregationMemoryEfficient *bool `json:"distributedAggregationMemoryEfficient,omitempty" tf:"distributed_aggregation_memory_efficient,omitempty"` + // Determine the behavior of distributed subqueries. + DistributedAggregationMemoryEfficient *bool `json:"distributedAggregationMemoryEfficient,omitempty" tf:"distributed_aggregation_memory_efficient,omitempty"` -// Timeout for DDL queries, in milliseconds. -DistributedDdlTaskTimeout *float64 `json:"distributedDdlTaskTimeout,omitempty" tf:"distributed_ddl_task_timeout,omitempty"` + // Timeout for DDL queries, in milliseconds. + DistributedDdlTaskTimeout *float64 `json:"distributedDdlTaskTimeout,omitempty" tf:"distributed_ddl_task_timeout,omitempty"` -// Changes the behaviour of distributed subqueries. -DistributedProductMode *string `json:"distributedProductMode,omitempty" tf:"distributed_product_mode,omitempty"` + // Changes the behaviour of distributed subqueries. + DistributedProductMode *string `json:"distributedProductMode,omitempty" tf:"distributed_product_mode,omitempty"` -// Allows to retunr empty result. -EmptyResultForAggregationByEmptySet *bool `json:"emptyResultForAggregationByEmptySet,omitempty" tf:"empty_result_for_aggregation_by_empty_set,omitempty"` + // Allows to retunr empty result. + EmptyResultForAggregationByEmptySet *bool `json:"emptyResultForAggregationByEmptySet,omitempty" tf:"empty_result_for_aggregation_by_empty_set,omitempty"` -// Enables or disables data compression in the response to an HTTP request. -EnableHTTPCompression *bool `json:"enableHttpCompression,omitempty" tf:"enable_http_compression,omitempty"` + // Enables or disables data compression in the response to an HTTP request. + EnableHTTPCompression *bool `json:"enableHttpCompression,omitempty" tf:"enable_http_compression,omitempty"` -// Forces a query to an out-of-date replica if updated data is not available. -FallbackToStaleReplicasForDistributedQueries *bool `json:"fallbackToStaleReplicasForDistributedQueries,omitempty" tf:"fallback_to_stale_replicas_for_distributed_queries,omitempty"` + // Forces a query to an out-of-date replica if updated data is not available. + FallbackToStaleReplicasForDistributedQueries *bool `json:"fallbackToStaleReplicasForDistributedQueries,omitempty" tf:"fallback_to_stale_replicas_for_distributed_queries,omitempty"` -// Sets the data format of a nested columns. -FlattenNested *bool `json:"flattenNested,omitempty" tf:"flatten_nested,omitempty"` + // Sets the data format of a nested columns. + FlattenNested *bool `json:"flattenNested,omitempty" tf:"flatten_nested,omitempty"` -// Disables query execution if the index can’t be used by date. -ForceIndexByDate *bool `json:"forceIndexByDate,omitempty" tf:"force_index_by_date,omitempty"` + // Disables query execution if the index can’t be used by date. + ForceIndexByDate *bool `json:"forceIndexByDate,omitempty" tf:"force_index_by_date,omitempty"` -// Disables query execution if indexing by the primary key is not possible. -ForcePrimaryKey *bool `json:"forcePrimaryKey,omitempty" tf:"force_primary_key,omitempty"` + // Disables query execution if indexing by the primary key is not possible. + ForcePrimaryKey *bool `json:"forcePrimaryKey,omitempty" tf:"force_primary_key,omitempty"` -// Regular expression (for Regexp format). -FormatRegexp *string `json:"formatRegexp,omitempty" tf:"format_regexp,omitempty"` + // Regular expression (for Regexp format). + FormatRegexp *string `json:"formatRegexp,omitempty" tf:"format_regexp,omitempty"` -// Skip lines unmatched by regular expression. -FormatRegexpSkipUnmatched *bool `json:"formatRegexpSkipUnmatched,omitempty" tf:"format_regexp_skip_unmatched,omitempty"` + // Skip lines unmatched by regular expression. + FormatRegexpSkipUnmatched *bool `json:"formatRegexpSkipUnmatched,omitempty" tf:"format_regexp_skip_unmatched,omitempty"` -// Sets behaviour on overflow while GROUP BY operation. Possible values: -GroupByOverflowMode *string `json:"groupByOverflowMode,omitempty" tf:"group_by_overflow_mode,omitempty"` + // Sets behaviour on overflow while GROUP BY operation. Possible values: + GroupByOverflowMode *string `json:"groupByOverflowMode,omitempty" tf:"group_by_overflow_mode,omitempty"` -// Sets the threshold of the number of keys, after that the two-level aggregation should be used. -GroupByTwoLevelThreshold *float64 `json:"groupByTwoLevelThreshold,omitempty" tf:"group_by_two_level_threshold,omitempty"` + // Sets the threshold of the number of keys, after that the two-level aggregation should be used. + GroupByTwoLevelThreshold *float64 `json:"groupByTwoLevelThreshold,omitempty" tf:"group_by_two_level_threshold,omitempty"` -// Sets the threshold of the number of bytes, after that the two-level aggregation should be used. -GroupByTwoLevelThresholdBytes *float64 `json:"groupByTwoLevelThresholdBytes,omitempty" tf:"group_by_two_level_threshold_bytes,omitempty"` + // Sets the threshold of the number of bytes, after that the two-level aggregation should be used. + GroupByTwoLevelThresholdBytes *float64 `json:"groupByTwoLevelThresholdBytes,omitempty" tf:"group_by_two_level_threshold_bytes,omitempty"` -// Timeout for HTTP connection in milliseconds. -HTTPConnectionTimeout *float64 `json:"httpConnectionTimeout,omitempty" tf:"http_connection_timeout,omitempty"` + // Timeout for HTTP connection in milliseconds. + HTTPConnectionTimeout *float64 `json:"httpConnectionTimeout,omitempty" tf:"http_connection_timeout,omitempty"` -// Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress. -HTTPHeadersProgressInterval *float64 `json:"httpHeadersProgressInterval,omitempty" tf:"http_headers_progress_interval,omitempty"` + // Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress. + HTTPHeadersProgressInterval *float64 `json:"httpHeadersProgressInterval,omitempty" tf:"http_headers_progress_interval,omitempty"` -// Timeout for HTTP connection in milliseconds. -HTTPReceiveTimeout *float64 `json:"httpReceiveTimeout,omitempty" tf:"http_receive_timeout,omitempty"` + // Timeout for HTTP connection in milliseconds. + HTTPReceiveTimeout *float64 `json:"httpReceiveTimeout,omitempty" tf:"http_receive_timeout,omitempty"` -// Timeout for HTTP connection in milliseconds. -HTTPSendTimeout *float64 `json:"httpSendTimeout,omitempty" tf:"http_send_timeout,omitempty"` + // Timeout for HTTP connection in milliseconds. + HTTPSendTimeout *float64 `json:"httpSendTimeout,omitempty" tf:"http_send_timeout,omitempty"` -// Connection timeout for establishing connection with replica for Hedged requests. Default value: 50 milliseconds. -HedgedConnectionTimeoutMs *float64 `json:"hedgedConnectionTimeoutMs,omitempty" tf:"hedged_connection_timeout_ms,omitempty"` + // Connection timeout for establishing connection with replica for Hedged requests. Default value: 50 milliseconds. + HedgedConnectionTimeoutMs *float64 `json:"hedgedConnectionTimeoutMs,omitempty" tf:"hedged_connection_timeout_ms,omitempty"` -// Timeout to close idle TCP connections after specified number of seconds. Default value: 3600 seconds. -IdleConnectionTimeout *float64 `json:"idleConnectionTimeout,omitempty" tf:"idle_connection_timeout,omitempty"` + // Timeout to close idle TCP connections after specified number of seconds. Default value: 3600 seconds. + IdleConnectionTimeout *float64 `json:"idleConnectionTimeout,omitempty" tf:"idle_connection_timeout,omitempty"` -// When performing INSERT queries, replace omitted input column values with default values of the respective columns. -InputFormatDefaultsForOmittedFields *bool `json:"inputFormatDefaultsForOmittedFields,omitempty" tf:"input_format_defaults_for_omitted_fields,omitempty"` + // When performing INSERT queries, replace omitted input column values with default values of the respective columns. + InputFormatDefaultsForOmittedFields *bool `json:"inputFormatDefaultsForOmittedFields,omitempty" tf:"input_format_defaults_for_omitted_fields,omitempty"` -// Enables or disables the insertion of JSON data with nested objects. -InputFormatImportNestedJSON *bool `json:"inputFormatImportNestedJson,omitempty" tf:"input_format_import_nested_json,omitempty"` + // Enables or disables the insertion of JSON data with nested objects. + InputFormatImportNestedJSON *bool `json:"inputFormatImportNestedJson,omitempty" tf:"input_format_import_nested_json,omitempty"` -// Enables or disables the initialization of NULL fields with default values, if data type of these fields is not nullable. -InputFormatNullAsDefault *bool `json:"inputFormatNullAsDefault,omitempty" tf:"input_format_null_as_default,omitempty"` + // Enables or disables the initialization of NULL fields with default values, if data type of these fields is not nullable. + InputFormatNullAsDefault *bool `json:"inputFormatNullAsDefault,omitempty" tf:"input_format_null_as_default,omitempty"` -// Enables or disables order-preserving parallel parsing of data formats. Supported only for TSV, TKSV, CSV and JSONEachRow formats. -InputFormatParallelParsing *bool `json:"inputFormatParallelParsing,omitempty" tf:"input_format_parallel_parsing,omitempty"` + // Enables or disables order-preserving parallel parsing of data formats. Supported only for TSV, TKSV, CSV and JSONEachRow formats. + InputFormatParallelParsing *bool `json:"inputFormatParallelParsing,omitempty" tf:"input_format_parallel_parsing,omitempty"` -// Enables or disables the full SQL parser if the fast stream parser can’t parse the data. -InputFormatValuesInterpretExpressions *bool `json:"inputFormatValuesInterpretExpressions,omitempty" tf:"input_format_values_interpret_expressions,omitempty"` + // Enables or disables the full SQL parser if the fast stream parser can’t parse the data. + InputFormatValuesInterpretExpressions *bool `json:"inputFormatValuesInterpretExpressions,omitempty" tf:"input_format_values_interpret_expressions,omitempty"` -// Enables or disables checking the column order when inserting data. -InputFormatWithNamesUseHeader *bool `json:"inputFormatWithNamesUseHeader,omitempty" tf:"input_format_with_names_use_header,omitempty"` + // Enables or disables checking the column order when inserting data. + InputFormatWithNamesUseHeader *bool `json:"inputFormatWithNamesUseHeader,omitempty" tf:"input_format_with_names_use_header,omitempty"` -// The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries. -InsertKeeperMaxRetries *float64 `json:"insertKeeperMaxRetries,omitempty" tf:"insert_keeper_max_retries,omitempty"` + // The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries. + InsertKeeperMaxRetries *float64 `json:"insertKeeperMaxRetries,omitempty" tf:"insert_keeper_max_retries,omitempty"` -// Enables the insertion of default values instead of NULL into columns with not nullable data type. Default value: true. -InsertNullAsDefault *bool `json:"insertNullAsDefault,omitempty" tf:"insert_null_as_default,omitempty"` + // Enables the insertion of default values instead of NULL into columns with not nullable data type. Default value: true. + InsertNullAsDefault *bool `json:"insertNullAsDefault,omitempty" tf:"insert_null_as_default,omitempty"` -// Enables the quorum writes. -InsertQuorum *float64 `json:"insertQuorum,omitempty" tf:"insert_quorum,omitempty"` + // Enables the quorum writes. + InsertQuorum *float64 `json:"insertQuorum,omitempty" tf:"insert_quorum,omitempty"` -// Enables or disables parallelism for quorum INSERT queries. -InsertQuorumParallel *bool `json:"insertQuorumParallel,omitempty" tf:"insert_quorum_parallel,omitempty"` + // Enables or disables parallelism for quorum INSERT queries. + InsertQuorumParallel *bool `json:"insertQuorumParallel,omitempty" tf:"insert_quorum_parallel,omitempty"` -// Write to a quorum timeout in milliseconds. -InsertQuorumTimeout *float64 `json:"insertQuorumTimeout,omitempty" tf:"insert_quorum_timeout,omitempty"` + // Write to a quorum timeout in milliseconds. + InsertQuorumTimeout *float64 `json:"insertQuorumTimeout,omitempty" tf:"insert_quorum_timeout,omitempty"` -// Specifies which JOIN algorithm is used. Possible values: -JoinAlgorithm []*string `json:"joinAlgorithm,omitempty" tf:"join_algorithm,omitempty"` + // Specifies which JOIN algorithm is used. Possible values: + JoinAlgorithm []*string `json:"joinAlgorithm,omitempty" tf:"join_algorithm,omitempty"` -// Sets behaviour on overflow in JOIN. Possible values: -JoinOverflowMode *string `json:"joinOverflowMode,omitempty" tf:"join_overflow_mode,omitempty"` + // Sets behaviour on overflow in JOIN. Possible values: + JoinOverflowMode *string `json:"joinOverflowMode,omitempty" tf:"join_overflow_mode,omitempty"` -// Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting. -JoinUseNulls *bool `json:"joinUseNulls,omitempty" tf:"join_use_nulls,omitempty"` + // Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting. + JoinUseNulls *bool `json:"joinUseNulls,omitempty" tf:"join_use_nulls,omitempty"` -// Require aliases for subselects and table functions in FROM that more than one table is present. -JoinedSubqueryRequiresAlias *bool `json:"joinedSubqueryRequiresAlias,omitempty" tf:"joined_subquery_requires_alias,omitempty"` + // Require aliases for subselects and table functions in FROM that more than one table is present. + JoinedSubqueryRequiresAlias *bool `json:"joinedSubqueryRequiresAlias,omitempty" tf:"joined_subquery_requires_alias,omitempty"` -// Specifies the algorithm of replicas selection that is used for distributed query processing, one of: random, nearest_hostname, in_order, first_or_random, round_robin. Default value: random. -LoadBalancing *string `json:"loadBalancing,omitempty" tf:"load_balancing,omitempty"` + // Specifies the algorithm of replicas selection that is used for distributed query processing, one of: random, nearest_hostname, in_order, first_or_random, round_robin. Default value: random. + LoadBalancing *string `json:"loadBalancing,omitempty" tf:"load_balancing,omitempty"` -// Method of reading data from local filesystem. Possible values: -LocalFilesystemReadMethod *string `json:"localFilesystemReadMethod,omitempty" tf:"local_filesystem_read_method,omitempty"` + // Method of reading data from local filesystem. Possible values: + LocalFilesystemReadMethod *string `json:"localFilesystemReadMethod,omitempty" tf:"local_filesystem_read_method,omitempty"` -// Setting up query threads logging. Query threads log into the system.query_thread_log table. This setting has effect only when log_queries is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the query_thread_log server configuration parameter. Default value: true. -LogQueryThreads *bool `json:"logQueryThreads,omitempty" tf:"log_query_threads,omitempty"` + // Setting up query threads logging. Query threads log into the system.query_thread_log table. This setting has effect only when log_queries is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the query_thread_log server configuration parameter. Default value: true. + LogQueryThreads *bool `json:"logQueryThreads,omitempty" tf:"log_query_threads,omitempty"` -// Allows or restricts using the LowCardinality data type with the Native format. -LowCardinalityAllowInNativeFormat *bool `json:"lowCardinalityAllowInNativeFormat,omitempty" tf:"low_cardinality_allow_in_native_format,omitempty"` + // Allows or restricts using the LowCardinality data type with the Native format. + LowCardinalityAllowInNativeFormat *bool `json:"lowCardinalityAllowInNativeFormat,omitempty" tf:"low_cardinality_allow_in_native_format,omitempty"` -// Maximum abstract syntax tree depth. -MaxAstDepth *float64 `json:"maxAstDepth,omitempty" tf:"max_ast_depth,omitempty"` + // Maximum abstract syntax tree depth. + MaxAstDepth *float64 `json:"maxAstDepth,omitempty" tf:"max_ast_depth,omitempty"` -// Maximum abstract syntax tree elements. -MaxAstElements *float64 `json:"maxAstElements,omitempty" tf:"max_ast_elements,omitempty"` + // Maximum abstract syntax tree elements. + MaxAstElements *float64 `json:"maxAstElements,omitempty" tf:"max_ast_elements,omitempty"` -// A recommendation for what size of the block (in a count of rows) to load from tables. -MaxBlockSize *float64 `json:"maxBlockSize,omitempty" tf:"max_block_size,omitempty"` + // A recommendation for what size of the block (in a count of rows) to load from tables. + MaxBlockSize *float64 `json:"maxBlockSize,omitempty" tf:"max_block_size,omitempty"` -// Limit in bytes for using memoru for GROUP BY before using swap on disk. -MaxBytesBeforeExternalGroupBy *float64 `json:"maxBytesBeforeExternalGroupBy,omitempty" tf:"max_bytes_before_external_group_by,omitempty"` + // Limit in bytes for using memoru for GROUP BY before using swap on disk. + MaxBytesBeforeExternalGroupBy *float64 `json:"maxBytesBeforeExternalGroupBy,omitempty" tf:"max_bytes_before_external_group_by,omitempty"` -// This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation. -MaxBytesBeforeExternalSort *float64 `json:"maxBytesBeforeExternalSort,omitempty" tf:"max_bytes_before_external_sort,omitempty"` + // This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation. + MaxBytesBeforeExternalSort *float64 `json:"maxBytesBeforeExternalSort,omitempty" tf:"max_bytes_before_external_sort,omitempty"` -// Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT. -MaxBytesInDistinct *float64 `json:"maxBytesInDistinct,omitempty" tf:"max_bytes_in_distinct,omitempty"` + // Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT. + MaxBytesInDistinct *float64 `json:"maxBytesInDistinct,omitempty" tf:"max_bytes_in_distinct,omitempty"` -// Limit on maximum size of the hash table for JOIN, in bytes. -MaxBytesInJoin *float64 `json:"maxBytesInJoin,omitempty" tf:"max_bytes_in_join,omitempty"` + // Limit on maximum size of the hash table for JOIN, in bytes. + MaxBytesInJoin *float64 `json:"maxBytesInJoin,omitempty" tf:"max_bytes_in_join,omitempty"` -// Limit on the number of bytes in the set resulting from the execution of the IN section. -MaxBytesInSet *float64 `json:"maxBytesInSet,omitempty" tf:"max_bytes_in_set,omitempty"` + // Limit on the number of bytes in the set resulting from the execution of the IN section. + MaxBytesInSet *float64 `json:"maxBytesInSet,omitempty" tf:"max_bytes_in_set,omitempty"` -// Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query. -MaxBytesToRead *float64 `json:"maxBytesToRead,omitempty" tf:"max_bytes_to_read,omitempty"` + // Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query. + MaxBytesToRead *float64 `json:"maxBytesToRead,omitempty" tf:"max_bytes_to_read,omitempty"` -// Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting. -MaxBytesToSort *float64 `json:"maxBytesToSort,omitempty" tf:"max_bytes_to_sort,omitempty"` + // Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting. + MaxBytesToSort *float64 `json:"maxBytesToSort,omitempty" tf:"max_bytes_to_sort,omitempty"` -// Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. -MaxBytesToTransfer *float64 `json:"maxBytesToTransfer,omitempty" tf:"max_bytes_to_transfer,omitempty"` + // Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. + MaxBytesToTransfer *float64 `json:"maxBytesToTransfer,omitempty" tf:"max_bytes_to_transfer,omitempty"` -// Limits the maximum number of columns that can be read from a table in a single query. -MaxColumnsToRead *float64 `json:"maxColumnsToRead,omitempty" tf:"max_columns_to_read,omitempty"` + // Limits the maximum number of columns that can be read from a table in a single query. + MaxColumnsToRead *float64 `json:"maxColumnsToRead,omitempty" tf:"max_columns_to_read,omitempty"` -// The maximum number of concurrent requests per user. Default value: 0 (no limit). -MaxConcurrentQueriesForUser *float64 `json:"maxConcurrentQueriesForUser,omitempty" tf:"max_concurrent_queries_for_user,omitempty"` + // The maximum number of concurrent requests per user. Default value: 0 (no limit). + MaxConcurrentQueriesForUser *float64 `json:"maxConcurrentQueriesForUser,omitempty" tf:"max_concurrent_queries_for_user,omitempty"` -// Limits the maximum query execution time in milliseconds. -MaxExecutionTime *float64 `json:"maxExecutionTime,omitempty" tf:"max_execution_time,omitempty"` + // Limits the maximum query execution time in milliseconds. + MaxExecutionTime *float64 `json:"maxExecutionTime,omitempty" tf:"max_execution_time,omitempty"` -// Maximum abstract syntax tree depth after after expansion of aliases. -MaxExpandedAstElements *float64 `json:"maxExpandedAstElements,omitempty" tf:"max_expanded_ast_elements,omitempty"` + // Maximum abstract syntax tree depth after after expansion of aliases. + MaxExpandedAstElements *float64 `json:"maxExpandedAstElements,omitempty" tf:"max_expanded_ast_elements,omitempty"` -// Sets the maximum number of parallel threads for the SELECT query data read phase with the FINAL modifier. -MaxFinalThreads *float64 `json:"maxFinalThreads,omitempty" tf:"max_final_threads,omitempty"` + // Sets the maximum number of parallel threads for the SELECT query data read phase with the FINAL modifier. + MaxFinalThreads *float64 `json:"maxFinalThreads,omitempty" tf:"max_final_threads,omitempty"` -// Limits the maximum number of HTTP GET redirect hops for URL-engine tables. -MaxHTTPGetRedirects *float64 `json:"maxHttpGetRedirects,omitempty" tf:"max_http_get_redirects,omitempty"` + // Limits the maximum number of HTTP GET redirect hops for URL-engine tables. + MaxHTTPGetRedirects *float64 `json:"maxHttpGetRedirects,omitempty" tf:"max_http_get_redirects,omitempty"` -// The size of blocks (in a count of rows) to form for insertion into a table. -MaxInsertBlockSize *float64 `json:"maxInsertBlockSize,omitempty" tf:"max_insert_block_size,omitempty"` + // The size of blocks (in a count of rows) to form for insertion into a table. + MaxInsertBlockSize *float64 `json:"maxInsertBlockSize,omitempty" tf:"max_insert_block_size,omitempty"` -// The maximum number of threads to execute the INSERT SELECT query. Default value: 0. -MaxInsertThreads *float64 `json:"maxInsertThreads,omitempty" tf:"max_insert_threads,omitempty"` + // The maximum number of threads to execute the INSERT SELECT query. Default value: 0. + MaxInsertThreads *float64 `json:"maxInsertThreads,omitempty" tf:"max_insert_threads,omitempty"` -// Limits the maximum memory usage (in bytes) for processing queries on a single server. -MaxMemoryUsage *float64 `json:"maxMemoryUsage,omitempty" tf:"max_memory_usage,omitempty"` + // Limits the maximum memory usage (in bytes) for processing queries on a single server. + MaxMemoryUsage *float64 `json:"maxMemoryUsage,omitempty" tf:"max_memory_usage,omitempty"` -// Limits the maximum memory usage (in bytes) for processing of user's queries on a single server. -MaxMemoryUsageForUser *float64 `json:"maxMemoryUsageForUser,omitempty" tf:"max_memory_usage_for_user,omitempty"` + // Limits the maximum memory usage (in bytes) for processing of user's queries on a single server. + MaxMemoryUsageForUser *float64 `json:"maxMemoryUsageForUser,omitempty" tf:"max_memory_usage_for_user,omitempty"` -// Limits the speed of the data exchange over the network in bytes per second. -MaxNetworkBandwidth *float64 `json:"maxNetworkBandwidth,omitempty" tf:"max_network_bandwidth,omitempty"` + // Limits the speed of the data exchange over the network in bytes per second. + MaxNetworkBandwidth *float64 `json:"maxNetworkBandwidth,omitempty" tf:"max_network_bandwidth,omitempty"` -// Limits the speed of the data exchange over the network in bytes per second. -MaxNetworkBandwidthForUser *float64 `json:"maxNetworkBandwidthForUser,omitempty" tf:"max_network_bandwidth_for_user,omitempty"` + // Limits the speed of the data exchange over the network in bytes per second. + MaxNetworkBandwidthForUser *float64 `json:"maxNetworkBandwidthForUser,omitempty" tf:"max_network_bandwidth_for_user,omitempty"` -// Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size. Zero means unlimited. -MaxParserDepth *float64 `json:"maxParserDepth,omitempty" tf:"max_parser_depth,omitempty"` + // Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size. Zero means unlimited. + MaxParserDepth *float64 `json:"maxParserDepth,omitempty" tf:"max_parser_depth,omitempty"` -// The maximum part of a query that can be taken to RAM for parsing with the SQL parser. -MaxQuerySize *float64 `json:"maxQuerySize,omitempty" tf:"max_query_size,omitempty"` + // The maximum part of a query that can be taken to RAM for parsing with the SQL parser. + MaxQuerySize *float64 `json:"maxQuerySize,omitempty" tf:"max_query_size,omitempty"` -// The maximum size of the buffer to read from the filesystem. -MaxReadBufferSize *float64 `json:"maxReadBufferSize,omitempty" tf:"max_read_buffer_size,omitempty"` + // The maximum size of the buffer to read from the filesystem. + MaxReadBufferSize *float64 `json:"maxReadBufferSize,omitempty" tf:"max_read_buffer_size,omitempty"` -// Disables lagging replicas for distributed queries. -MaxReplicaDelayForDistributedQueries *float64 `json:"maxReplicaDelayForDistributedQueries,omitempty" tf:"max_replica_delay_for_distributed_queries,omitempty"` + // Disables lagging replicas for distributed queries. + MaxReplicaDelayForDistributedQueries *float64 `json:"maxReplicaDelayForDistributedQueries,omitempty" tf:"max_replica_delay_for_distributed_queries,omitempty"` -// Limits the number of bytes in the result. -MaxResultBytes *float64 `json:"maxResultBytes,omitempty" tf:"max_result_bytes,omitempty"` + // Limits the number of bytes in the result. + MaxResultBytes *float64 `json:"maxResultBytes,omitempty" tf:"max_result_bytes,omitempty"` -// Limits the number of rows in the result. -MaxResultRows *float64 `json:"maxResultRows,omitempty" tf:"max_result_rows,omitempty"` + // Limits the number of rows in the result. + MaxResultRows *float64 `json:"maxResultRows,omitempty" tf:"max_result_rows,omitempty"` -// Limits the maximum number of different rows when using DISTINCT. -MaxRowsInDistinct *float64 `json:"maxRowsInDistinct,omitempty" tf:"max_rows_in_distinct,omitempty"` + // Limits the maximum number of different rows when using DISTINCT. + MaxRowsInDistinct *float64 `json:"maxRowsInDistinct,omitempty" tf:"max_rows_in_distinct,omitempty"` -// Limit on maximum size of the hash table for JOIN, in rows. -MaxRowsInJoin *float64 `json:"maxRowsInJoin,omitempty" tf:"max_rows_in_join,omitempty"` + // Limit on maximum size of the hash table for JOIN, in rows. + MaxRowsInJoin *float64 `json:"maxRowsInJoin,omitempty" tf:"max_rows_in_join,omitempty"` -// Limit on the number of rows in the set resulting from the execution of the IN section. -MaxRowsInSet *float64 `json:"maxRowsInSet,omitempty" tf:"max_rows_in_set,omitempty"` + // Limit on the number of rows in the set resulting from the execution of the IN section. + MaxRowsInSet *float64 `json:"maxRowsInSet,omitempty" tf:"max_rows_in_set,omitempty"` -// Limits the maximum number of unique keys received from aggregation function. -MaxRowsToGroupBy *float64 `json:"maxRowsToGroupBy,omitempty" tf:"max_rows_to_group_by,omitempty"` + // Limits the maximum number of unique keys received from aggregation function. + MaxRowsToGroupBy *float64 `json:"maxRowsToGroupBy,omitempty" tf:"max_rows_to_group_by,omitempty"` -// Limits the maximum number of rows that can be read from a table when running a query. -MaxRowsToRead *float64 `json:"maxRowsToRead,omitempty" tf:"max_rows_to_read,omitempty"` + // Limits the maximum number of rows that can be read from a table when running a query. + MaxRowsToRead *float64 `json:"maxRowsToRead,omitempty" tf:"max_rows_to_read,omitempty"` -// Limits the maximum number of rows that can be read from a table for sorting. -MaxRowsToSort *float64 `json:"maxRowsToSort,omitempty" tf:"max_rows_to_sort,omitempty"` + // Limits the maximum number of rows that can be read from a table for sorting. + MaxRowsToSort *float64 `json:"maxRowsToSort,omitempty" tf:"max_rows_to_sort,omitempty"` -// Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. -MaxRowsToTransfer *float64 `json:"maxRowsToTransfer,omitempty" tf:"max_rows_to_transfer,omitempty"` + // Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. + MaxRowsToTransfer *float64 `json:"maxRowsToTransfer,omitempty" tf:"max_rows_to_transfer,omitempty"` -// Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns. -MaxTemporaryColumns *float64 `json:"maxTemporaryColumns,omitempty" tf:"max_temporary_columns,omitempty"` + // Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns. + MaxTemporaryColumns *float64 `json:"maxTemporaryColumns,omitempty" tf:"max_temporary_columns,omitempty"` -// The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries. Zero means unlimited. -MaxTemporaryDataOnDiskSizeForQuery *float64 `json:"maxTemporaryDataOnDiskSizeForQuery,omitempty" tf:"max_temporary_data_on_disk_size_for_query,omitempty"` + // The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries. Zero means unlimited. + MaxTemporaryDataOnDiskSizeForQuery *float64 `json:"maxTemporaryDataOnDiskSizeForQuery,omitempty" tf:"max_temporary_data_on_disk_size_for_query,omitempty"` -// The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running user queries. Zero means unlimited. -MaxTemporaryDataOnDiskSizeForUser *float64 `json:"maxTemporaryDataOnDiskSizeForUser,omitempty" tf:"max_temporary_data_on_disk_size_for_user,omitempty"` + // The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running user queries. Zero means unlimited. + MaxTemporaryDataOnDiskSizeForUser *float64 `json:"maxTemporaryDataOnDiskSizeForUser,omitempty" tf:"max_temporary_data_on_disk_size_for_user,omitempty"` -// Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns. -MaxTemporaryNonConstColumns *float64 `json:"maxTemporaryNonConstColumns,omitempty" tf:"max_temporary_non_const_columns,omitempty"` + // Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns. + MaxTemporaryNonConstColumns *float64 `json:"maxTemporaryNonConstColumns,omitempty" tf:"max_temporary_non_const_columns,omitempty"` -// The maximum number of query processing threads, excluding threads for retrieving data from remote servers. -MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` + // The maximum number of query processing threads, excluding threads for retrieving data from remote servers. + MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` -// It represents soft memory limit in case when hard limit is reached on user level. This value is used to compute overcommit ratio for the query. Zero means skip the query. -MemoryOvercommitRatioDenominator *float64 `json:"memoryOvercommitRatioDenominator,omitempty" tf:"memory_overcommit_ratio_denominator,omitempty"` + // It represents soft memory limit in case when hard limit is reached on user level. This value is used to compute overcommit ratio for the query. Zero means skip the query. + MemoryOvercommitRatioDenominator *float64 `json:"memoryOvercommitRatioDenominator,omitempty" tf:"memory_overcommit_ratio_denominator,omitempty"` -// It represents soft memory limit in case when hard limit is reached on global level. This value is used to compute overcommit ratio for the query. Zero means skip the query. -MemoryOvercommitRatioDenominatorForUser *float64 `json:"memoryOvercommitRatioDenominatorForUser,omitempty" tf:"memory_overcommit_ratio_denominator_for_user,omitempty"` + // It represents soft memory limit in case when hard limit is reached on global level. This value is used to compute overcommit ratio for the query. Zero means skip the query. + MemoryOvercommitRatioDenominatorForUser *float64 `json:"memoryOvercommitRatioDenominatorForUser,omitempty" tf:"memory_overcommit_ratio_denominator_for_user,omitempty"` -// Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Possible values: from 0 to 1. Default: 0. -MemoryProfilerSampleProbability *float64 `json:"memoryProfilerSampleProbability,omitempty" tf:"memory_profiler_sample_probability,omitempty"` + // Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Possible values: from 0 to 1. Default: 0. + MemoryProfilerSampleProbability *float64 `json:"memoryProfilerSampleProbability,omitempty" tf:"memory_profiler_sample_probability,omitempty"` -// Memory profiler step (in bytes). If the next query step requires more memory than this parameter specifies, the memory profiler collects the allocating stack trace. Values lower than a few megabytes slow down query processing. Default value: 4194304 (4 MB). Zero means disabled memory profiler. -MemoryProfilerStep *float64 `json:"memoryProfilerStep,omitempty" tf:"memory_profiler_step,omitempty"` + // Memory profiler step (in bytes). If the next query step requires more memory than this parameter specifies, the memory profiler collects the allocating stack trace. Values lower than a few megabytes slow down query processing. Default value: 4194304 (4 MB). Zero means disabled memory profiler. + MemoryProfilerStep *float64 `json:"memoryProfilerStep,omitempty" tf:"memory_profiler_step,omitempty"` -// Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. If the timeout is reached and memory is not freed, an exception is thrown. -MemoryUsageOvercommitMaxWaitMicroseconds *float64 `json:"memoryUsageOvercommitMaxWaitMicroseconds,omitempty" tf:"memory_usage_overcommit_max_wait_microseconds,omitempty"` + // Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. If the timeout is reached and memory is not freed, an exception is thrown. + MemoryUsageOvercommitMaxWaitMicroseconds *float64 `json:"memoryUsageOvercommitMaxWaitMicroseconds,omitempty" tf:"memory_usage_overcommit_max_wait_microseconds,omitempty"` -// If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks. -MergeTreeMaxBytesToUseCache *float64 `json:"mergeTreeMaxBytesToUseCache,omitempty" tf:"merge_tree_max_bytes_to_use_cache,omitempty"` + // If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks. + MergeTreeMaxBytesToUseCache *float64 `json:"mergeTreeMaxBytesToUseCache,omitempty" tf:"merge_tree_max_bytes_to_use_cache,omitempty"` -// If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks. -MergeTreeMaxRowsToUseCache *float64 `json:"mergeTreeMaxRowsToUseCache,omitempty" tf:"merge_tree_max_rows_to_use_cache,omitempty"` + // If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks. + MergeTreeMaxRowsToUseCache *float64 `json:"mergeTreeMaxRowsToUseCache,omitempty" tf:"merge_tree_max_rows_to_use_cache,omitempty"` -// If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads. -MergeTreeMinBytesForConcurrentRead *float64 `json:"mergeTreeMinBytesForConcurrentRead,omitempty" tf:"merge_tree_min_bytes_for_concurrent_read,omitempty"` + // If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads. + MergeTreeMinBytesForConcurrentRead *float64 `json:"mergeTreeMinBytesForConcurrentRead,omitempty" tf:"merge_tree_min_bytes_for_concurrent_read,omitempty"` -// If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads. -MergeTreeMinRowsForConcurrentRead *float64 `json:"mergeTreeMinRowsForConcurrentRead,omitempty" tf:"merge_tree_min_rows_for_concurrent_read,omitempty"` + // If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads. + MergeTreeMinRowsForConcurrentRead *float64 `json:"mergeTreeMinRowsForConcurrentRead,omitempty" tf:"merge_tree_min_rows_for_concurrent_read,omitempty"` -// The minimum data volume required for using direct I/O access to the storage disk. -MinBytesToUseDirectIo *float64 `json:"minBytesToUseDirectIo,omitempty" tf:"min_bytes_to_use_direct_io,omitempty"` + // The minimum data volume required for using direct I/O access to the storage disk. + MinBytesToUseDirectIo *float64 `json:"minBytesToUseDirectIo,omitempty" tf:"min_bytes_to_use_direct_io,omitempty"` -// How many times to potentially use a compiled chunk of code before running compilation. -MinCountToCompile *float64 `json:"minCountToCompile,omitempty" tf:"min_count_to_compile,omitempty"` + // How many times to potentially use a compiled chunk of code before running compilation. + MinCountToCompile *float64 `json:"minCountToCompile,omitempty" tf:"min_count_to_compile,omitempty"` -// A query waits for expression compilation process to complete prior to continuing execution. -MinCountToCompileExpression *float64 `json:"minCountToCompileExpression,omitempty" tf:"min_count_to_compile_expression,omitempty"` + // A query waits for expression compilation process to complete prior to continuing execution. + MinCountToCompileExpression *float64 `json:"minCountToCompileExpression,omitempty" tf:"min_count_to_compile_expression,omitempty"` -// Minimal execution speed in rows per second. -MinExecutionSpeed *float64 `json:"minExecutionSpeed,omitempty" tf:"min_execution_speed,omitempty"` + // Minimal execution speed in rows per second. + MinExecutionSpeed *float64 `json:"minExecutionSpeed,omitempty" tf:"min_execution_speed,omitempty"` -// Minimal execution speed in bytes per second. -MinExecutionSpeedBytes *float64 `json:"minExecutionSpeedBytes,omitempty" tf:"min_execution_speed_bytes,omitempty"` + // Minimal execution speed in bytes per second. + MinExecutionSpeedBytes *float64 `json:"minExecutionSpeedBytes,omitempty" tf:"min_execution_speed_bytes,omitempty"` -// Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query. -MinInsertBlockSizeBytes *float64 `json:"minInsertBlockSizeBytes,omitempty" tf:"min_insert_block_size_bytes,omitempty"` + // Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query. + MinInsertBlockSizeBytes *float64 `json:"minInsertBlockSizeBytes,omitempty" tf:"min_insert_block_size_bytes,omitempty"` -// Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query. -MinInsertBlockSizeRows *float64 `json:"minInsertBlockSizeRows,omitempty" tf:"min_insert_block_size_rows,omitempty"` + // Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query. + MinInsertBlockSizeRows *float64 `json:"minInsertBlockSizeRows,omitempty" tf:"min_insert_block_size_rows,omitempty"` -// If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. -OutputFormatJSONQuote64BitIntegers *bool `json:"outputFormatJsonQuote64BitIntegers,omitempty" tf:"output_format_json_quote_64bit_integers,omitempty"` + // If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. + OutputFormatJSONQuote64BitIntegers *bool `json:"outputFormatJsonQuote64BitIntegers,omitempty" tf:"output_format_json_quote_64bit_integers,omitempty"` -// Enables +nan, -nan, +inf, -inf outputs in JSON output format. -OutputFormatJSONQuoteDenormals *bool `json:"outputFormatJsonQuoteDenormals,omitempty" tf:"output_format_json_quote_denormals,omitempty"` + // Enables +nan, -nan, +inf, -inf outputs in JSON output format. + OutputFormatJSONQuoteDenormals *bool `json:"outputFormatJsonQuoteDenormals,omitempty" tf:"output_format_json_quote_denormals,omitempty"` -// Enables/disables preferable using the localhost replica when processing distributed queries. Default value: true. -PreferLocalhostReplica *bool `json:"preferLocalhostReplica,omitempty" tf:"prefer_localhost_replica,omitempty"` + // Enables/disables preferable using the localhost replica when processing distributed queries. Default value: true. + PreferLocalhostReplica *bool `json:"preferLocalhostReplica,omitempty" tf:"prefer_localhost_replica,omitempty"` -// Query priority. -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // Query priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// Quota accounting mode. -QuotaMode *string `json:"quotaMode,omitempty" tf:"quota_mode,omitempty"` + // Quota accounting mode. + QuotaMode *string `json:"quotaMode,omitempty" tf:"quota_mode,omitempty"` -// Sets behaviour on overflow while read. Possible values: -ReadOverflowMode *string `json:"readOverflowMode,omitempty" tf:"read_overflow_mode,omitempty"` + // Sets behaviour on overflow while read. Possible values: + ReadOverflowMode *string `json:"readOverflowMode,omitempty" tf:"read_overflow_mode,omitempty"` -// Restricts permissions for reading data, write data and change settings queries. -Readonly *float64 `json:"readonly,omitempty" tf:"readonly,omitempty"` + // Restricts permissions for reading data, write data and change settings queries. + Readonly *float64 `json:"readonly,omitempty" tf:"readonly,omitempty"` -// Receive timeout in milliseconds on the socket used for communicating with the client. -ReceiveTimeout *float64 `json:"receiveTimeout,omitempty" tf:"receive_timeout,omitempty"` + // Receive timeout in milliseconds on the socket used for communicating with the client. + ReceiveTimeout *float64 `json:"receiveTimeout,omitempty" tf:"receive_timeout,omitempty"` -// Method of reading data from remote filesystem, one of: read, threadpool. -RemoteFilesystemReadMethod *string `json:"remoteFilesystemReadMethod,omitempty" tf:"remote_filesystem_read_method,omitempty"` + // Method of reading data from remote filesystem, one of: read, threadpool. + RemoteFilesystemReadMethod *string `json:"remoteFilesystemReadMethod,omitempty" tf:"remote_filesystem_read_method,omitempty"` -// For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting. -ReplicationAlterPartitionsSync *float64 `json:"replicationAlterPartitionsSync,omitempty" tf:"replication_alter_partitions_sync,omitempty"` + // For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting. + ReplicationAlterPartitionsSync *float64 `json:"replicationAlterPartitionsSync,omitempty" tf:"replication_alter_partitions_sync,omitempty"` -// Sets behaviour on overflow in result. Possible values: -ResultOverflowMode *string `json:"resultOverflowMode,omitempty" tf:"result_overflow_mode,omitempty"` + // Sets behaviour on overflow in result. Possible values: + ResultOverflowMode *string `json:"resultOverflowMode,omitempty" tf:"result_overflow_mode,omitempty"` -// Enables or disables sequential consistency for SELECT queries. -SelectSequentialConsistency *bool `json:"selectSequentialConsistency,omitempty" tf:"select_sequential_consistency,omitempty"` + // Enables or disables sequential consistency for SELECT queries. + SelectSequentialConsistency *bool `json:"selectSequentialConsistency,omitempty" tf:"select_sequential_consistency,omitempty"` -// Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses. -SendProgressInHTTPHeaders *bool `json:"sendProgressInHttpHeaders,omitempty" tf:"send_progress_in_http_headers,omitempty"` + // Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses. + SendProgressInHTTPHeaders *bool `json:"sendProgressInHttpHeaders,omitempty" tf:"send_progress_in_http_headers,omitempty"` -// Send timeout in milliseconds on the socket used for communicating with the client. -SendTimeout *float64 `json:"sendTimeout,omitempty" tf:"send_timeout,omitempty"` + // Send timeout in milliseconds on the socket used for communicating with the client. + SendTimeout *float64 `json:"sendTimeout,omitempty" tf:"send_timeout,omitempty"` -// Sets behaviour on overflow in the set resulting. Possible values: -SetOverflowMode *string `json:"setOverflowMode,omitempty" tf:"set_overflow_mode,omitempty"` + // Sets behaviour on overflow in the set resulting. Possible values: + SetOverflowMode *string `json:"setOverflowMode,omitempty" tf:"set_overflow_mode,omitempty"` -// Enables or disables silently skipping of unavailable shards. -SkipUnavailableShards *bool `json:"skipUnavailableShards,omitempty" tf:"skip_unavailable_shards,omitempty"` + // Enables or disables silently skipping of unavailable shards. + SkipUnavailableShards *bool `json:"skipUnavailableShards,omitempty" tf:"skip_unavailable_shards,omitempty"` -// Sets behaviour on overflow while sort. Possible values: -SortOverflowMode *string `json:"sortOverflowMode,omitempty" tf:"sort_overflow_mode,omitempty"` + // Sets behaviour on overflow while sort. Possible values: + SortOverflowMode *string `json:"sortOverflowMode,omitempty" tf:"sort_overflow_mode,omitempty"` -// Timeout (in seconds) between checks of execution speed. It is checked that execution speed is not less that specified in min_execution_speed parameter. Must be at least 1000. -TimeoutBeforeCheckingExecutionSpeed *float64 `json:"timeoutBeforeCheckingExecutionSpeed,omitempty" tf:"timeout_before_checking_execution_speed,omitempty"` + // Timeout (in seconds) between checks of execution speed. It is checked that execution speed is not less that specified in min_execution_speed parameter. Must be at least 1000. + TimeoutBeforeCheckingExecutionSpeed *float64 `json:"timeoutBeforeCheckingExecutionSpeed,omitempty" tf:"timeout_before_checking_execution_speed,omitempty"` -// Sets behaviour on overflow. Possible values: -TimeoutOverflowMode *string `json:"timeoutOverflowMode,omitempty" tf:"timeout_overflow_mode,omitempty"` + // Sets behaviour on overflow. Possible values: + TimeoutOverflowMode *string `json:"timeoutOverflowMode,omitempty" tf:"timeout_overflow_mode,omitempty"` -// Sets behaviour on overflow. Possible values: -TransferOverflowMode *string `json:"transferOverflowMode,omitempty" tf:"transfer_overflow_mode,omitempty"` + // Sets behaviour on overflow. Possible values: + TransferOverflowMode *string `json:"transferOverflowMode,omitempty" tf:"transfer_overflow_mode,omitempty"` -// Enables equality of NULL values for IN operator. -TransformNullIn *bool `json:"transformNullIn,omitempty" tf:"transform_null_in,omitempty"` + // Enables equality of NULL values for IN operator. + TransformNullIn *bool `json:"transformNullIn,omitempty" tf:"transform_null_in,omitempty"` -// Enables hedged requests logic for remote queries. It allows to establish many connections with different replicas for query. New connection is enabled in case existent connection(s) with replica(s) were not established within hedged_connection_timeout or no data was received within receive_data_timeout. Query uses the first connection which send non empty progress packet (or data packet, if allow_changing_replica_until_first_data_packet); other connections are cancelled. Queries with max_parallel_replicas > 1 are supported. Default value: true. -UseHedgedRequests *bool `json:"useHedgedRequests,omitempty" tf:"use_hedged_requests,omitempty"` + // Enables hedged requests logic for remote queries. It allows to establish many connections with different replicas for query. New connection is enabled in case existent connection(s) with replica(s) were not established within hedged_connection_timeout or no data was received within receive_data_timeout. Query uses the first connection which send non empty progress packet (or data packet, if allow_changing_replica_until_first_data_packet); other connections are cancelled. Queries with max_parallel_replicas > 1 are supported. Default value: true. + UseHedgedRequests *bool `json:"useHedgedRequests,omitempty" tf:"use_hedged_requests,omitempty"` -// Whether to use a cache of uncompressed blocks. -UseUncompressedCache *bool `json:"useUncompressedCache,omitempty" tf:"use_uncompressed_cache,omitempty"` + // Whether to use a cache of uncompressed blocks. + UseUncompressedCache *bool `json:"useUncompressedCache,omitempty" tf:"use_uncompressed_cache,omitempty"` -// Enables waiting for processing of asynchronous insertion. If enabled, server returns OK only after the data is inserted. -WaitForAsyncInsert *bool `json:"waitForAsyncInsert,omitempty" tf:"wait_for_async_insert,omitempty"` + // Enables waiting for processing of asynchronous insertion. If enabled, server returns OK only after the data is inserted. + WaitForAsyncInsert *bool `json:"waitForAsyncInsert,omitempty" tf:"wait_for_async_insert,omitempty"` -// The timeout (in seconds) for waiting for processing of asynchronous insertion. Value must be at least 1000 (1 second). -WaitForAsyncInsertTimeout *float64 `json:"waitForAsyncInsertTimeout,omitempty" tf:"wait_for_async_insert_timeout,omitempty"` + // The timeout (in seconds) for waiting for processing of asynchronous insertion. Value must be at least 1000 (1 second). + WaitForAsyncInsertTimeout *float64 `json:"waitForAsyncInsertTimeout,omitempty" tf:"wait_for_async_insert_timeout,omitempty"` } - type UserSettingsParameters struct { + // Include CORS headers in HTTP responces. + // +kubebuilder:validation:Optional + AddHTTPCorsHeader *bool `json:"addHttpCorsHeader,omitempty" tf:"add_http_cors_header,omitempty"` -// Include CORS headers in HTTP responces. -// +kubebuilder:validation:Optional -AddHTTPCorsHeader *bool `json:"addHttpCorsHeader,omitempty" tf:"add_http_cors_header,omitempty"` - -// Allows or denies DDL queries. -// +kubebuilder:validation:Optional -AllowDdl *bool `json:"allowDdl,omitempty" tf:"allow_ddl,omitempty"` + // Allows or denies DDL queries. + // +kubebuilder:validation:Optional + AllowDdl *bool `json:"allowDdl,omitempty" tf:"allow_ddl,omitempty"` -// Enables introspections functions for query profiling. -// +kubebuilder:validation:Optional -AllowIntrospectionFunctions *bool `json:"allowIntrospectionFunctions,omitempty" tf:"allow_introspection_functions,omitempty"` + // Enables introspections functions for query profiling. + // +kubebuilder:validation:Optional + AllowIntrospectionFunctions *bool `json:"allowIntrospectionFunctions,omitempty" tf:"allow_introspection_functions,omitempty"` -// Allows specifying LowCardinality modifier for types of small fixed size (8 or less) in CREATE TABLE statements. Enabling this may increase merge times and memory consumption. -// +kubebuilder:validation:Optional -AllowSuspiciousLowCardinalityTypes *bool `json:"allowSuspiciousLowCardinalityTypes,omitempty" tf:"allow_suspicious_low_cardinality_types,omitempty"` + // Allows specifying LowCardinality modifier for types of small fixed size (8 or less) in CREATE TABLE statements. Enabling this may increase merge times and memory consumption. + // +kubebuilder:validation:Optional + AllowSuspiciousLowCardinalityTypes *bool `json:"allowSuspiciousLowCardinalityTypes,omitempty" tf:"allow_suspicious_low_cardinality_types,omitempty"` -// enables legacy ClickHouse server behaviour in ANY INNER|LEFT JOIN operations. -// +kubebuilder:validation:Optional -AnyJoinDistinctRightTableKeys *bool `json:"anyJoinDistinctRightTableKeys,omitempty" tf:"any_join_distinct_right_table_keys,omitempty"` + // enables legacy ClickHouse server behaviour in ANY INNER|LEFT JOIN operations. + // +kubebuilder:validation:Optional + AnyJoinDistinctRightTableKeys *bool `json:"anyJoinDistinctRightTableKeys,omitempty" tf:"any_join_distinct_right_table_keys,omitempty"` -// Enables asynchronous inserts. Disabled by default. -// +kubebuilder:validation:Optional -AsyncInsert *bool `json:"asyncInsert,omitempty" tf:"async_insert,omitempty"` + // Enables asynchronous inserts. Disabled by default. + // +kubebuilder:validation:Optional + AsyncInsert *bool `json:"asyncInsert,omitempty" tf:"async_insert,omitempty"` -// The maximum timeout in milliseconds since the first INSERT query before inserting collected data. If the parameter is set to 0, the timeout is disabled. Default value: 200. -// +kubebuilder:validation:Optional -AsyncInsertBusyTimeout *float64 `json:"asyncInsertBusyTimeout,omitempty" tf:"async_insert_busy_timeout,omitempty"` + // The maximum timeout in milliseconds since the first INSERT query before inserting collected data. If the parameter is set to 0, the timeout is disabled. Default value: 200. + // +kubebuilder:validation:Optional + AsyncInsertBusyTimeout *float64 `json:"asyncInsertBusyTimeout,omitempty" tf:"async_insert_busy_timeout,omitempty"` -// The maximum size of the unparsed data in bytes collected per query before being inserted. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 100000. -// +kubebuilder:validation:Optional -AsyncInsertMaxDataSize *float64 `json:"asyncInsertMaxDataSize,omitempty" tf:"async_insert_max_data_size,omitempty"` + // The maximum size of the unparsed data in bytes collected per query before being inserted. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 100000. + // +kubebuilder:validation:Optional + AsyncInsertMaxDataSize *float64 `json:"asyncInsertMaxDataSize,omitempty" tf:"async_insert_max_data_size,omitempty"` -// The maximum timeout in milliseconds since the last INSERT query before dumping collected data. If enabled, the settings prolongs the async_insert_busy_timeout with every INSERT query as long as async_insert_max_data_size is not exceeded. -// +kubebuilder:validation:Optional -AsyncInsertStaleTimeout *float64 `json:"asyncInsertStaleTimeout,omitempty" tf:"async_insert_stale_timeout,omitempty"` + // The maximum timeout in milliseconds since the last INSERT query before dumping collected data. If enabled, the settings prolongs the async_insert_busy_timeout with every INSERT query as long as async_insert_max_data_size is not exceeded. + // +kubebuilder:validation:Optional + AsyncInsertStaleTimeout *float64 `json:"asyncInsertStaleTimeout,omitempty" tf:"async_insert_stale_timeout,omitempty"` -// The maximum number of threads for background data parsing and insertion. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 16. -// +kubebuilder:validation:Optional -AsyncInsertThreads *float64 `json:"asyncInsertThreads,omitempty" tf:"async_insert_threads,omitempty"` + // The maximum number of threads for background data parsing and insertion. If the parameter is set to 0, asynchronous insertions are disabled. Default value: 16. + // +kubebuilder:validation:Optional + AsyncInsertThreads *float64 `json:"asyncInsertThreads,omitempty" tf:"async_insert_threads,omitempty"` -// Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. Default value: false. -// +kubebuilder:validation:Optional -CancelHTTPReadonlyQueriesOnClientClose *bool `json:"cancelHttpReadonlyQueriesOnClientClose,omitempty" tf:"cancel_http_readonly_queries_on_client_close,omitempty"` + // Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. Default value: false. + // +kubebuilder:validation:Optional + CancelHTTPReadonlyQueriesOnClientClose *bool `json:"cancelHttpReadonlyQueriesOnClientClose,omitempty" tf:"cancel_http_readonly_queries_on_client_close,omitempty"` -// Enable compilation of queries. -// +kubebuilder:validation:Optional -Compile *bool `json:"compile,omitempty" tf:"compile,omitempty"` + // Enable compilation of queries. + // +kubebuilder:validation:Optional + Compile *bool `json:"compile,omitempty" tf:"compile,omitempty"` -// Turn on expression compilation. -// +kubebuilder:validation:Optional -CompileExpressions *bool `json:"compileExpressions,omitempty" tf:"compile_expressions,omitempty"` + // Turn on expression compilation. + // +kubebuilder:validation:Optional + CompileExpressions *bool `json:"compileExpressions,omitempty" tf:"compile_expressions,omitempty"` -// Connect timeout in milliseconds on the socket used for communicating with the client. -// +kubebuilder:validation:Optional -ConnectTimeout *float64 `json:"connectTimeout,omitempty" tf:"connect_timeout,omitempty"` + // Connect timeout in milliseconds on the socket used for communicating with the client. + // +kubebuilder:validation:Optional + ConnectTimeout *float64 `json:"connectTimeout,omitempty" tf:"connect_timeout,omitempty"` -// The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition. If unsuccessful, several attempts are made to connect to various replicas. Default value: 50. -// +kubebuilder:validation:Optional -ConnectTimeoutWithFailover *float64 `json:"connectTimeoutWithFailover,omitempty" tf:"connect_timeout_with_failover,omitempty"` + // The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition. If unsuccessful, several attempts are made to connect to various replicas. Default value: 50. + // +kubebuilder:validation:Optional + ConnectTimeoutWithFailover *float64 `json:"connectTimeoutWithFailover,omitempty" tf:"connect_timeout_with_failover,omitempty"` -// Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction. -// +kubebuilder:validation:Optional -CountDistinctImplementation *string `json:"countDistinctImplementation,omitempty" tf:"count_distinct_implementation,omitempty"` + // Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction. + // +kubebuilder:validation:Optional + CountDistinctImplementation *string `json:"countDistinctImplementation,omitempty" tf:"count_distinct_implementation,omitempty"` -// Allows choosing a parser of the text representation of date and time, one of: best_effort, basic, best_effort_us. Default value: basic. Cloud default value: best_effort. -// +kubebuilder:validation:Optional -DateTimeInputFormat *string `json:"dateTimeInputFormat,omitempty" tf:"date_time_input_format,omitempty"` + // Allows choosing a parser of the text representation of date and time, one of: best_effort, basic, best_effort_us. Default value: basic. Cloud default value: best_effort. + // +kubebuilder:validation:Optional + DateTimeInputFormat *string `json:"dateTimeInputFormat,omitempty" tf:"date_time_input_format,omitempty"` -// Allows choosing different output formats of the text representation of date and time, one of: simple, iso, unix_timestamp. Default value: simple. -// +kubebuilder:validation:Optional -DateTimeOutputFormat *string `json:"dateTimeOutputFormat,omitempty" tf:"date_time_output_format,omitempty"` + // Allows choosing different output formats of the text representation of date and time, one of: simple, iso, unix_timestamp. Default value: simple. + // +kubebuilder:validation:Optional + DateTimeOutputFormat *string `json:"dateTimeOutputFormat,omitempty" tf:"date_time_output_format,omitempty"` -// Enables or disables the deduplication check for materialized views that receive data from Replicated* tables. -// +kubebuilder:validation:Optional -DeduplicateBlocksInDependentMaterializedViews *bool `json:"deduplicateBlocksInDependentMaterializedViews,omitempty" tf:"deduplicate_blocks_in_dependent_materialized_views,omitempty"` + // Enables or disables the deduplication check for materialized views that receive data from Replicated* tables. + // +kubebuilder:validation:Optional + DeduplicateBlocksInDependentMaterializedViews *bool `json:"deduplicateBlocksInDependentMaterializedViews,omitempty" tf:"deduplicate_blocks_in_dependent_materialized_views,omitempty"` -// Sets behaviour on overflow when using DISTINCT. Possible values: -// +kubebuilder:validation:Optional -DistinctOverflowMode *string `json:"distinctOverflowMode,omitempty" tf:"distinct_overflow_mode,omitempty"` + // Sets behaviour on overflow when using DISTINCT. Possible values: + // +kubebuilder:validation:Optional + DistinctOverflowMode *string `json:"distinctOverflowMode,omitempty" tf:"distinct_overflow_mode,omitempty"` -// Determine the behavior of distributed subqueries. -// +kubebuilder:validation:Optional -DistributedAggregationMemoryEfficient *bool `json:"distributedAggregationMemoryEfficient,omitempty" tf:"distributed_aggregation_memory_efficient,omitempty"` + // Determine the behavior of distributed subqueries. + // +kubebuilder:validation:Optional + DistributedAggregationMemoryEfficient *bool `json:"distributedAggregationMemoryEfficient,omitempty" tf:"distributed_aggregation_memory_efficient,omitempty"` -// Timeout for DDL queries, in milliseconds. -// +kubebuilder:validation:Optional -DistributedDdlTaskTimeout *float64 `json:"distributedDdlTaskTimeout,omitempty" tf:"distributed_ddl_task_timeout,omitempty"` + // Timeout for DDL queries, in milliseconds. + // +kubebuilder:validation:Optional + DistributedDdlTaskTimeout *float64 `json:"distributedDdlTaskTimeout,omitempty" tf:"distributed_ddl_task_timeout,omitempty"` -// Changes the behaviour of distributed subqueries. -// +kubebuilder:validation:Optional -DistributedProductMode *string `json:"distributedProductMode,omitempty" tf:"distributed_product_mode,omitempty"` + // Changes the behaviour of distributed subqueries. + // +kubebuilder:validation:Optional + DistributedProductMode *string `json:"distributedProductMode,omitempty" tf:"distributed_product_mode,omitempty"` -// Allows to retunr empty result. -// +kubebuilder:validation:Optional -EmptyResultForAggregationByEmptySet *bool `json:"emptyResultForAggregationByEmptySet,omitempty" tf:"empty_result_for_aggregation_by_empty_set,omitempty"` + // Allows to retunr empty result. + // +kubebuilder:validation:Optional + EmptyResultForAggregationByEmptySet *bool `json:"emptyResultForAggregationByEmptySet,omitempty" tf:"empty_result_for_aggregation_by_empty_set,omitempty"` -// Enables or disables data compression in the response to an HTTP request. -// +kubebuilder:validation:Optional -EnableHTTPCompression *bool `json:"enableHttpCompression,omitempty" tf:"enable_http_compression,omitempty"` + // Enables or disables data compression in the response to an HTTP request. + // +kubebuilder:validation:Optional + EnableHTTPCompression *bool `json:"enableHttpCompression,omitempty" tf:"enable_http_compression,omitempty"` -// Forces a query to an out-of-date replica if updated data is not available. -// +kubebuilder:validation:Optional -FallbackToStaleReplicasForDistributedQueries *bool `json:"fallbackToStaleReplicasForDistributedQueries,omitempty" tf:"fallback_to_stale_replicas_for_distributed_queries,omitempty"` + // Forces a query to an out-of-date replica if updated data is not available. + // +kubebuilder:validation:Optional + FallbackToStaleReplicasForDistributedQueries *bool `json:"fallbackToStaleReplicasForDistributedQueries,omitempty" tf:"fallback_to_stale_replicas_for_distributed_queries,omitempty"` -// Sets the data format of a nested columns. -// +kubebuilder:validation:Optional -FlattenNested *bool `json:"flattenNested,omitempty" tf:"flatten_nested,omitempty"` + // Sets the data format of a nested columns. + // +kubebuilder:validation:Optional + FlattenNested *bool `json:"flattenNested,omitempty" tf:"flatten_nested,omitempty"` -// Disables query execution if the index can’t be used by date. -// +kubebuilder:validation:Optional -ForceIndexByDate *bool `json:"forceIndexByDate,omitempty" tf:"force_index_by_date,omitempty"` + // Disables query execution if the index can’t be used by date. + // +kubebuilder:validation:Optional + ForceIndexByDate *bool `json:"forceIndexByDate,omitempty" tf:"force_index_by_date,omitempty"` -// Disables query execution if indexing by the primary key is not possible. -// +kubebuilder:validation:Optional -ForcePrimaryKey *bool `json:"forcePrimaryKey,omitempty" tf:"force_primary_key,omitempty"` + // Disables query execution if indexing by the primary key is not possible. + // +kubebuilder:validation:Optional + ForcePrimaryKey *bool `json:"forcePrimaryKey,omitempty" tf:"force_primary_key,omitempty"` -// Regular expression (for Regexp format). -// +kubebuilder:validation:Optional -FormatRegexp *string `json:"formatRegexp,omitempty" tf:"format_regexp,omitempty"` + // Regular expression (for Regexp format). + // +kubebuilder:validation:Optional + FormatRegexp *string `json:"formatRegexp,omitempty" tf:"format_regexp,omitempty"` -// Skip lines unmatched by regular expression. -// +kubebuilder:validation:Optional -FormatRegexpSkipUnmatched *bool `json:"formatRegexpSkipUnmatched,omitempty" tf:"format_regexp_skip_unmatched,omitempty"` + // Skip lines unmatched by regular expression. + // +kubebuilder:validation:Optional + FormatRegexpSkipUnmatched *bool `json:"formatRegexpSkipUnmatched,omitempty" tf:"format_regexp_skip_unmatched,omitempty"` -// Sets behaviour on overflow while GROUP BY operation. Possible values: -// +kubebuilder:validation:Optional -GroupByOverflowMode *string `json:"groupByOverflowMode,omitempty" tf:"group_by_overflow_mode,omitempty"` + // Sets behaviour on overflow while GROUP BY operation. Possible values: + // +kubebuilder:validation:Optional + GroupByOverflowMode *string `json:"groupByOverflowMode,omitempty" tf:"group_by_overflow_mode,omitempty"` -// Sets the threshold of the number of keys, after that the two-level aggregation should be used. -// +kubebuilder:validation:Optional -GroupByTwoLevelThreshold *float64 `json:"groupByTwoLevelThreshold,omitempty" tf:"group_by_two_level_threshold,omitempty"` + // Sets the threshold of the number of keys, after that the two-level aggregation should be used. + // +kubebuilder:validation:Optional + GroupByTwoLevelThreshold *float64 `json:"groupByTwoLevelThreshold,omitempty" tf:"group_by_two_level_threshold,omitempty"` -// Sets the threshold of the number of bytes, after that the two-level aggregation should be used. -// +kubebuilder:validation:Optional -GroupByTwoLevelThresholdBytes *float64 `json:"groupByTwoLevelThresholdBytes,omitempty" tf:"group_by_two_level_threshold_bytes,omitempty"` + // Sets the threshold of the number of bytes, after that the two-level aggregation should be used. + // +kubebuilder:validation:Optional + GroupByTwoLevelThresholdBytes *float64 `json:"groupByTwoLevelThresholdBytes,omitempty" tf:"group_by_two_level_threshold_bytes,omitempty"` -// Timeout for HTTP connection in milliseconds. -// +kubebuilder:validation:Optional -HTTPConnectionTimeout *float64 `json:"httpConnectionTimeout,omitempty" tf:"http_connection_timeout,omitempty"` + // Timeout for HTTP connection in milliseconds. + // +kubebuilder:validation:Optional + HTTPConnectionTimeout *float64 `json:"httpConnectionTimeout,omitempty" tf:"http_connection_timeout,omitempty"` -// Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress. -// +kubebuilder:validation:Optional -HTTPHeadersProgressInterval *float64 `json:"httpHeadersProgressInterval,omitempty" tf:"http_headers_progress_interval,omitempty"` + // Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress. + // +kubebuilder:validation:Optional + HTTPHeadersProgressInterval *float64 `json:"httpHeadersProgressInterval,omitempty" tf:"http_headers_progress_interval,omitempty"` -// Timeout for HTTP connection in milliseconds. -// +kubebuilder:validation:Optional -HTTPReceiveTimeout *float64 `json:"httpReceiveTimeout,omitempty" tf:"http_receive_timeout,omitempty"` + // Timeout for HTTP connection in milliseconds. + // +kubebuilder:validation:Optional + HTTPReceiveTimeout *float64 `json:"httpReceiveTimeout,omitempty" tf:"http_receive_timeout,omitempty"` -// Timeout for HTTP connection in milliseconds. -// +kubebuilder:validation:Optional -HTTPSendTimeout *float64 `json:"httpSendTimeout,omitempty" tf:"http_send_timeout,omitempty"` + // Timeout for HTTP connection in milliseconds. + // +kubebuilder:validation:Optional + HTTPSendTimeout *float64 `json:"httpSendTimeout,omitempty" tf:"http_send_timeout,omitempty"` -// Connection timeout for establishing connection with replica for Hedged requests. Default value: 50 milliseconds. -// +kubebuilder:validation:Optional -HedgedConnectionTimeoutMs *float64 `json:"hedgedConnectionTimeoutMs,omitempty" tf:"hedged_connection_timeout_ms,omitempty"` + // Connection timeout for establishing connection with replica for Hedged requests. Default value: 50 milliseconds. + // +kubebuilder:validation:Optional + HedgedConnectionTimeoutMs *float64 `json:"hedgedConnectionTimeoutMs,omitempty" tf:"hedged_connection_timeout_ms,omitempty"` -// Timeout to close idle TCP connections after specified number of seconds. Default value: 3600 seconds. -// +kubebuilder:validation:Optional -IdleConnectionTimeout *float64 `json:"idleConnectionTimeout,omitempty" tf:"idle_connection_timeout,omitempty"` + // Timeout to close idle TCP connections after specified number of seconds. Default value: 3600 seconds. + // +kubebuilder:validation:Optional + IdleConnectionTimeout *float64 `json:"idleConnectionTimeout,omitempty" tf:"idle_connection_timeout,omitempty"` -// When performing INSERT queries, replace omitted input column values with default values of the respective columns. -// +kubebuilder:validation:Optional -InputFormatDefaultsForOmittedFields *bool `json:"inputFormatDefaultsForOmittedFields,omitempty" tf:"input_format_defaults_for_omitted_fields,omitempty"` + // When performing INSERT queries, replace omitted input column values with default values of the respective columns. + // +kubebuilder:validation:Optional + InputFormatDefaultsForOmittedFields *bool `json:"inputFormatDefaultsForOmittedFields,omitempty" tf:"input_format_defaults_for_omitted_fields,omitempty"` -// Enables or disables the insertion of JSON data with nested objects. -// +kubebuilder:validation:Optional -InputFormatImportNestedJSON *bool `json:"inputFormatImportNestedJson,omitempty" tf:"input_format_import_nested_json,omitempty"` + // Enables or disables the insertion of JSON data with nested objects. + // +kubebuilder:validation:Optional + InputFormatImportNestedJSON *bool `json:"inputFormatImportNestedJson,omitempty" tf:"input_format_import_nested_json,omitempty"` -// Enables or disables the initialization of NULL fields with default values, if data type of these fields is not nullable. -// +kubebuilder:validation:Optional -InputFormatNullAsDefault *bool `json:"inputFormatNullAsDefault,omitempty" tf:"input_format_null_as_default,omitempty"` + // Enables or disables the initialization of NULL fields with default values, if data type of these fields is not nullable. + // +kubebuilder:validation:Optional + InputFormatNullAsDefault *bool `json:"inputFormatNullAsDefault,omitempty" tf:"input_format_null_as_default,omitempty"` -// Enables or disables order-preserving parallel parsing of data formats. Supported only for TSV, TKSV, CSV and JSONEachRow formats. -// +kubebuilder:validation:Optional -InputFormatParallelParsing *bool `json:"inputFormatParallelParsing,omitempty" tf:"input_format_parallel_parsing,omitempty"` + // Enables or disables order-preserving parallel parsing of data formats. Supported only for TSV, TKSV, CSV and JSONEachRow formats. + // +kubebuilder:validation:Optional + InputFormatParallelParsing *bool `json:"inputFormatParallelParsing,omitempty" tf:"input_format_parallel_parsing,omitempty"` -// Enables or disables the full SQL parser if the fast stream parser can’t parse the data. -// +kubebuilder:validation:Optional -InputFormatValuesInterpretExpressions *bool `json:"inputFormatValuesInterpretExpressions,omitempty" tf:"input_format_values_interpret_expressions,omitempty"` + // Enables or disables the full SQL parser if the fast stream parser can’t parse the data. + // +kubebuilder:validation:Optional + InputFormatValuesInterpretExpressions *bool `json:"inputFormatValuesInterpretExpressions,omitempty" tf:"input_format_values_interpret_expressions,omitempty"` -// Enables or disables checking the column order when inserting data. -// +kubebuilder:validation:Optional -InputFormatWithNamesUseHeader *bool `json:"inputFormatWithNamesUseHeader,omitempty" tf:"input_format_with_names_use_header,omitempty"` + // Enables or disables checking the column order when inserting data. + // +kubebuilder:validation:Optional + InputFormatWithNamesUseHeader *bool `json:"inputFormatWithNamesUseHeader,omitempty" tf:"input_format_with_names_use_header,omitempty"` -// The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries. -// +kubebuilder:validation:Optional -InsertKeeperMaxRetries *float64 `json:"insertKeeperMaxRetries,omitempty" tf:"insert_keeper_max_retries,omitempty"` + // The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries. + // +kubebuilder:validation:Optional + InsertKeeperMaxRetries *float64 `json:"insertKeeperMaxRetries,omitempty" tf:"insert_keeper_max_retries,omitempty"` -// Enables the insertion of default values instead of NULL into columns with not nullable data type. Default value: true. -// +kubebuilder:validation:Optional -InsertNullAsDefault *bool `json:"insertNullAsDefault,omitempty" tf:"insert_null_as_default,omitempty"` + // Enables the insertion of default values instead of NULL into columns with not nullable data type. Default value: true. + // +kubebuilder:validation:Optional + InsertNullAsDefault *bool `json:"insertNullAsDefault,omitempty" tf:"insert_null_as_default,omitempty"` -// Enables the quorum writes. -// +kubebuilder:validation:Optional -InsertQuorum *float64 `json:"insertQuorum,omitempty" tf:"insert_quorum,omitempty"` + // Enables the quorum writes. + // +kubebuilder:validation:Optional + InsertQuorum *float64 `json:"insertQuorum,omitempty" tf:"insert_quorum,omitempty"` -// Enables or disables parallelism for quorum INSERT queries. -// +kubebuilder:validation:Optional -InsertQuorumParallel *bool `json:"insertQuorumParallel,omitempty" tf:"insert_quorum_parallel,omitempty"` + // Enables or disables parallelism for quorum INSERT queries. + // +kubebuilder:validation:Optional + InsertQuorumParallel *bool `json:"insertQuorumParallel,omitempty" tf:"insert_quorum_parallel,omitempty"` -// Write to a quorum timeout in milliseconds. -// +kubebuilder:validation:Optional -InsertQuorumTimeout *float64 `json:"insertQuorumTimeout,omitempty" tf:"insert_quorum_timeout,omitempty"` + // Write to a quorum timeout in milliseconds. + // +kubebuilder:validation:Optional + InsertQuorumTimeout *float64 `json:"insertQuorumTimeout,omitempty" tf:"insert_quorum_timeout,omitempty"` -// Specifies which JOIN algorithm is used. Possible values: -// +kubebuilder:validation:Optional -JoinAlgorithm []*string `json:"joinAlgorithm,omitempty" tf:"join_algorithm,omitempty"` + // Specifies which JOIN algorithm is used. Possible values: + // +kubebuilder:validation:Optional + JoinAlgorithm []*string `json:"joinAlgorithm,omitempty" tf:"join_algorithm,omitempty"` -// Sets behaviour on overflow in JOIN. Possible values: -// +kubebuilder:validation:Optional -JoinOverflowMode *string `json:"joinOverflowMode,omitempty" tf:"join_overflow_mode,omitempty"` + // Sets behaviour on overflow in JOIN. Possible values: + // +kubebuilder:validation:Optional + JoinOverflowMode *string `json:"joinOverflowMode,omitempty" tf:"join_overflow_mode,omitempty"` -// Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting. -// +kubebuilder:validation:Optional -JoinUseNulls *bool `json:"joinUseNulls,omitempty" tf:"join_use_nulls,omitempty"` + // Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting. + // +kubebuilder:validation:Optional + JoinUseNulls *bool `json:"joinUseNulls,omitempty" tf:"join_use_nulls,omitempty"` -// Require aliases for subselects and table functions in FROM that more than one table is present. -// +kubebuilder:validation:Optional -JoinedSubqueryRequiresAlias *bool `json:"joinedSubqueryRequiresAlias,omitempty" tf:"joined_subquery_requires_alias,omitempty"` + // Require aliases for subselects and table functions in FROM that more than one table is present. + // +kubebuilder:validation:Optional + JoinedSubqueryRequiresAlias *bool `json:"joinedSubqueryRequiresAlias,omitempty" tf:"joined_subquery_requires_alias,omitempty"` -// Specifies the algorithm of replicas selection that is used for distributed query processing, one of: random, nearest_hostname, in_order, first_or_random, round_robin. Default value: random. -// +kubebuilder:validation:Optional -LoadBalancing *string `json:"loadBalancing,omitempty" tf:"load_balancing,omitempty"` + // Specifies the algorithm of replicas selection that is used for distributed query processing, one of: random, nearest_hostname, in_order, first_or_random, round_robin. Default value: random. + // +kubebuilder:validation:Optional + LoadBalancing *string `json:"loadBalancing,omitempty" tf:"load_balancing,omitempty"` -// Method of reading data from local filesystem. Possible values: -// +kubebuilder:validation:Optional -LocalFilesystemReadMethod *string `json:"localFilesystemReadMethod,omitempty" tf:"local_filesystem_read_method,omitempty"` + // Method of reading data from local filesystem. Possible values: + // +kubebuilder:validation:Optional + LocalFilesystemReadMethod *string `json:"localFilesystemReadMethod,omitempty" tf:"local_filesystem_read_method,omitempty"` -// Setting up query threads logging. Query threads log into the system.query_thread_log table. This setting has effect only when log_queries is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the query_thread_log server configuration parameter. Default value: true. -// +kubebuilder:validation:Optional -LogQueryThreads *bool `json:"logQueryThreads,omitempty" tf:"log_query_threads,omitempty"` + // Setting up query threads logging. Query threads log into the system.query_thread_log table. This setting has effect only when log_queries is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the query_thread_log server configuration parameter. Default value: true. + // +kubebuilder:validation:Optional + LogQueryThreads *bool `json:"logQueryThreads,omitempty" tf:"log_query_threads,omitempty"` -// Allows or restricts using the LowCardinality data type with the Native format. -// +kubebuilder:validation:Optional -LowCardinalityAllowInNativeFormat *bool `json:"lowCardinalityAllowInNativeFormat,omitempty" tf:"low_cardinality_allow_in_native_format,omitempty"` + // Allows or restricts using the LowCardinality data type with the Native format. + // +kubebuilder:validation:Optional + LowCardinalityAllowInNativeFormat *bool `json:"lowCardinalityAllowInNativeFormat,omitempty" tf:"low_cardinality_allow_in_native_format,omitempty"` -// Maximum abstract syntax tree depth. -// +kubebuilder:validation:Optional -MaxAstDepth *float64 `json:"maxAstDepth,omitempty" tf:"max_ast_depth,omitempty"` + // Maximum abstract syntax tree depth. + // +kubebuilder:validation:Optional + MaxAstDepth *float64 `json:"maxAstDepth,omitempty" tf:"max_ast_depth,omitempty"` -// Maximum abstract syntax tree elements. -// +kubebuilder:validation:Optional -MaxAstElements *float64 `json:"maxAstElements,omitempty" tf:"max_ast_elements,omitempty"` + // Maximum abstract syntax tree elements. + // +kubebuilder:validation:Optional + MaxAstElements *float64 `json:"maxAstElements,omitempty" tf:"max_ast_elements,omitempty"` -// A recommendation for what size of the block (in a count of rows) to load from tables. -// +kubebuilder:validation:Optional -MaxBlockSize *float64 `json:"maxBlockSize,omitempty" tf:"max_block_size,omitempty"` + // A recommendation for what size of the block (in a count of rows) to load from tables. + // +kubebuilder:validation:Optional + MaxBlockSize *float64 `json:"maxBlockSize,omitempty" tf:"max_block_size,omitempty"` -// Limit in bytes for using memoru for GROUP BY before using swap on disk. -// +kubebuilder:validation:Optional -MaxBytesBeforeExternalGroupBy *float64 `json:"maxBytesBeforeExternalGroupBy,omitempty" tf:"max_bytes_before_external_group_by,omitempty"` + // Limit in bytes for using memoru for GROUP BY before using swap on disk. + // +kubebuilder:validation:Optional + MaxBytesBeforeExternalGroupBy *float64 `json:"maxBytesBeforeExternalGroupBy,omitempty" tf:"max_bytes_before_external_group_by,omitempty"` -// This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation. -// +kubebuilder:validation:Optional -MaxBytesBeforeExternalSort *float64 `json:"maxBytesBeforeExternalSort,omitempty" tf:"max_bytes_before_external_sort,omitempty"` + // This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation. + // +kubebuilder:validation:Optional + MaxBytesBeforeExternalSort *float64 `json:"maxBytesBeforeExternalSort,omitempty" tf:"max_bytes_before_external_sort,omitempty"` -// Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT. -// +kubebuilder:validation:Optional -MaxBytesInDistinct *float64 `json:"maxBytesInDistinct,omitempty" tf:"max_bytes_in_distinct,omitempty"` + // Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT. + // +kubebuilder:validation:Optional + MaxBytesInDistinct *float64 `json:"maxBytesInDistinct,omitempty" tf:"max_bytes_in_distinct,omitempty"` -// Limit on maximum size of the hash table for JOIN, in bytes. -// +kubebuilder:validation:Optional -MaxBytesInJoin *float64 `json:"maxBytesInJoin,omitempty" tf:"max_bytes_in_join,omitempty"` + // Limit on maximum size of the hash table for JOIN, in bytes. + // +kubebuilder:validation:Optional + MaxBytesInJoin *float64 `json:"maxBytesInJoin,omitempty" tf:"max_bytes_in_join,omitempty"` -// Limit on the number of bytes in the set resulting from the execution of the IN section. -// +kubebuilder:validation:Optional -MaxBytesInSet *float64 `json:"maxBytesInSet,omitempty" tf:"max_bytes_in_set,omitempty"` + // Limit on the number of bytes in the set resulting from the execution of the IN section. + // +kubebuilder:validation:Optional + MaxBytesInSet *float64 `json:"maxBytesInSet,omitempty" tf:"max_bytes_in_set,omitempty"` -// Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query. -// +kubebuilder:validation:Optional -MaxBytesToRead *float64 `json:"maxBytesToRead,omitempty" tf:"max_bytes_to_read,omitempty"` + // Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query. + // +kubebuilder:validation:Optional + MaxBytesToRead *float64 `json:"maxBytesToRead,omitempty" tf:"max_bytes_to_read,omitempty"` -// Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting. -// +kubebuilder:validation:Optional -MaxBytesToSort *float64 `json:"maxBytesToSort,omitempty" tf:"max_bytes_to_sort,omitempty"` + // Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting. + // +kubebuilder:validation:Optional + MaxBytesToSort *float64 `json:"maxBytesToSort,omitempty" tf:"max_bytes_to_sort,omitempty"` -// Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. -// +kubebuilder:validation:Optional -MaxBytesToTransfer *float64 `json:"maxBytesToTransfer,omitempty" tf:"max_bytes_to_transfer,omitempty"` + // Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. + // +kubebuilder:validation:Optional + MaxBytesToTransfer *float64 `json:"maxBytesToTransfer,omitempty" tf:"max_bytes_to_transfer,omitempty"` -// Limits the maximum number of columns that can be read from a table in a single query. -// +kubebuilder:validation:Optional -MaxColumnsToRead *float64 `json:"maxColumnsToRead,omitempty" tf:"max_columns_to_read,omitempty"` + // Limits the maximum number of columns that can be read from a table in a single query. + // +kubebuilder:validation:Optional + MaxColumnsToRead *float64 `json:"maxColumnsToRead,omitempty" tf:"max_columns_to_read,omitempty"` -// The maximum number of concurrent requests per user. Default value: 0 (no limit). -// +kubebuilder:validation:Optional -MaxConcurrentQueriesForUser *float64 `json:"maxConcurrentQueriesForUser,omitempty" tf:"max_concurrent_queries_for_user,omitempty"` + // The maximum number of concurrent requests per user. Default value: 0 (no limit). + // +kubebuilder:validation:Optional + MaxConcurrentQueriesForUser *float64 `json:"maxConcurrentQueriesForUser,omitempty" tf:"max_concurrent_queries_for_user,omitempty"` -// Limits the maximum query execution time in milliseconds. -// +kubebuilder:validation:Optional -MaxExecutionTime *float64 `json:"maxExecutionTime,omitempty" tf:"max_execution_time,omitempty"` + // Limits the maximum query execution time in milliseconds. + // +kubebuilder:validation:Optional + MaxExecutionTime *float64 `json:"maxExecutionTime,omitempty" tf:"max_execution_time,omitempty"` -// Maximum abstract syntax tree depth after after expansion of aliases. -// +kubebuilder:validation:Optional -MaxExpandedAstElements *float64 `json:"maxExpandedAstElements,omitempty" tf:"max_expanded_ast_elements,omitempty"` + // Maximum abstract syntax tree depth after after expansion of aliases. + // +kubebuilder:validation:Optional + MaxExpandedAstElements *float64 `json:"maxExpandedAstElements,omitempty" tf:"max_expanded_ast_elements,omitempty"` -// Sets the maximum number of parallel threads for the SELECT query data read phase with the FINAL modifier. -// +kubebuilder:validation:Optional -MaxFinalThreads *float64 `json:"maxFinalThreads,omitempty" tf:"max_final_threads,omitempty"` + // Sets the maximum number of parallel threads for the SELECT query data read phase with the FINAL modifier. + // +kubebuilder:validation:Optional + MaxFinalThreads *float64 `json:"maxFinalThreads,omitempty" tf:"max_final_threads,omitempty"` -// Limits the maximum number of HTTP GET redirect hops for URL-engine tables. -// +kubebuilder:validation:Optional -MaxHTTPGetRedirects *float64 `json:"maxHttpGetRedirects,omitempty" tf:"max_http_get_redirects,omitempty"` + // Limits the maximum number of HTTP GET redirect hops for URL-engine tables. + // +kubebuilder:validation:Optional + MaxHTTPGetRedirects *float64 `json:"maxHttpGetRedirects,omitempty" tf:"max_http_get_redirects,omitempty"` -// The size of blocks (in a count of rows) to form for insertion into a table. -// +kubebuilder:validation:Optional -MaxInsertBlockSize *float64 `json:"maxInsertBlockSize,omitempty" tf:"max_insert_block_size,omitempty"` + // The size of blocks (in a count of rows) to form for insertion into a table. + // +kubebuilder:validation:Optional + MaxInsertBlockSize *float64 `json:"maxInsertBlockSize,omitempty" tf:"max_insert_block_size,omitempty"` -// The maximum number of threads to execute the INSERT SELECT query. Default value: 0. -// +kubebuilder:validation:Optional -MaxInsertThreads *float64 `json:"maxInsertThreads,omitempty" tf:"max_insert_threads,omitempty"` + // The maximum number of threads to execute the INSERT SELECT query. Default value: 0. + // +kubebuilder:validation:Optional + MaxInsertThreads *float64 `json:"maxInsertThreads,omitempty" tf:"max_insert_threads,omitempty"` -// Limits the maximum memory usage (in bytes) for processing queries on a single server. -// +kubebuilder:validation:Optional -MaxMemoryUsage *float64 `json:"maxMemoryUsage,omitempty" tf:"max_memory_usage,omitempty"` + // Limits the maximum memory usage (in bytes) for processing queries on a single server. + // +kubebuilder:validation:Optional + MaxMemoryUsage *float64 `json:"maxMemoryUsage,omitempty" tf:"max_memory_usage,omitempty"` -// Limits the maximum memory usage (in bytes) for processing of user's queries on a single server. -// +kubebuilder:validation:Optional -MaxMemoryUsageForUser *float64 `json:"maxMemoryUsageForUser,omitempty" tf:"max_memory_usage_for_user,omitempty"` + // Limits the maximum memory usage (in bytes) for processing of user's queries on a single server. + // +kubebuilder:validation:Optional + MaxMemoryUsageForUser *float64 `json:"maxMemoryUsageForUser,omitempty" tf:"max_memory_usage_for_user,omitempty"` -// Limits the speed of the data exchange over the network in bytes per second. -// +kubebuilder:validation:Optional -MaxNetworkBandwidth *float64 `json:"maxNetworkBandwidth,omitempty" tf:"max_network_bandwidth,omitempty"` + // Limits the speed of the data exchange over the network in bytes per second. + // +kubebuilder:validation:Optional + MaxNetworkBandwidth *float64 `json:"maxNetworkBandwidth,omitempty" tf:"max_network_bandwidth,omitempty"` -// Limits the speed of the data exchange over the network in bytes per second. -// +kubebuilder:validation:Optional -MaxNetworkBandwidthForUser *float64 `json:"maxNetworkBandwidthForUser,omitempty" tf:"max_network_bandwidth_for_user,omitempty"` + // Limits the speed of the data exchange over the network in bytes per second. + // +kubebuilder:validation:Optional + MaxNetworkBandwidthForUser *float64 `json:"maxNetworkBandwidthForUser,omitempty" tf:"max_network_bandwidth_for_user,omitempty"` -// Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size. Zero means unlimited. -// +kubebuilder:validation:Optional -MaxParserDepth *float64 `json:"maxParserDepth,omitempty" tf:"max_parser_depth,omitempty"` + // Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size. Zero means unlimited. + // +kubebuilder:validation:Optional + MaxParserDepth *float64 `json:"maxParserDepth,omitempty" tf:"max_parser_depth,omitempty"` -// The maximum part of a query that can be taken to RAM for parsing with the SQL parser. -// +kubebuilder:validation:Optional -MaxQuerySize *float64 `json:"maxQuerySize,omitempty" tf:"max_query_size,omitempty"` + // The maximum part of a query that can be taken to RAM for parsing with the SQL parser. + // +kubebuilder:validation:Optional + MaxQuerySize *float64 `json:"maxQuerySize,omitempty" tf:"max_query_size,omitempty"` -// The maximum size of the buffer to read from the filesystem. -// +kubebuilder:validation:Optional -MaxReadBufferSize *float64 `json:"maxReadBufferSize,omitempty" tf:"max_read_buffer_size,omitempty"` + // The maximum size of the buffer to read from the filesystem. + // +kubebuilder:validation:Optional + MaxReadBufferSize *float64 `json:"maxReadBufferSize,omitempty" tf:"max_read_buffer_size,omitempty"` -// Disables lagging replicas for distributed queries. -// +kubebuilder:validation:Optional -MaxReplicaDelayForDistributedQueries *float64 `json:"maxReplicaDelayForDistributedQueries,omitempty" tf:"max_replica_delay_for_distributed_queries,omitempty"` + // Disables lagging replicas for distributed queries. + // +kubebuilder:validation:Optional + MaxReplicaDelayForDistributedQueries *float64 `json:"maxReplicaDelayForDistributedQueries,omitempty" tf:"max_replica_delay_for_distributed_queries,omitempty"` -// Limits the number of bytes in the result. -// +kubebuilder:validation:Optional -MaxResultBytes *float64 `json:"maxResultBytes,omitempty" tf:"max_result_bytes,omitempty"` + // Limits the number of bytes in the result. + // +kubebuilder:validation:Optional + MaxResultBytes *float64 `json:"maxResultBytes,omitempty" tf:"max_result_bytes,omitempty"` -// Limits the number of rows in the result. -// +kubebuilder:validation:Optional -MaxResultRows *float64 `json:"maxResultRows,omitempty" tf:"max_result_rows,omitempty"` + // Limits the number of rows in the result. + // +kubebuilder:validation:Optional + MaxResultRows *float64 `json:"maxResultRows,omitempty" tf:"max_result_rows,omitempty"` -// Limits the maximum number of different rows when using DISTINCT. -// +kubebuilder:validation:Optional -MaxRowsInDistinct *float64 `json:"maxRowsInDistinct,omitempty" tf:"max_rows_in_distinct,omitempty"` + // Limits the maximum number of different rows when using DISTINCT. + // +kubebuilder:validation:Optional + MaxRowsInDistinct *float64 `json:"maxRowsInDistinct,omitempty" tf:"max_rows_in_distinct,omitempty"` -// Limit on maximum size of the hash table for JOIN, in rows. -// +kubebuilder:validation:Optional -MaxRowsInJoin *float64 `json:"maxRowsInJoin,omitempty" tf:"max_rows_in_join,omitempty"` + // Limit on maximum size of the hash table for JOIN, in rows. + // +kubebuilder:validation:Optional + MaxRowsInJoin *float64 `json:"maxRowsInJoin,omitempty" tf:"max_rows_in_join,omitempty"` -// Limit on the number of rows in the set resulting from the execution of the IN section. -// +kubebuilder:validation:Optional -MaxRowsInSet *float64 `json:"maxRowsInSet,omitempty" tf:"max_rows_in_set,omitempty"` + // Limit on the number of rows in the set resulting from the execution of the IN section. + // +kubebuilder:validation:Optional + MaxRowsInSet *float64 `json:"maxRowsInSet,omitempty" tf:"max_rows_in_set,omitempty"` -// Limits the maximum number of unique keys received from aggregation function. -// +kubebuilder:validation:Optional -MaxRowsToGroupBy *float64 `json:"maxRowsToGroupBy,omitempty" tf:"max_rows_to_group_by,omitempty"` + // Limits the maximum number of unique keys received from aggregation function. + // +kubebuilder:validation:Optional + MaxRowsToGroupBy *float64 `json:"maxRowsToGroupBy,omitempty" tf:"max_rows_to_group_by,omitempty"` -// Limits the maximum number of rows that can be read from a table when running a query. -// +kubebuilder:validation:Optional -MaxRowsToRead *float64 `json:"maxRowsToRead,omitempty" tf:"max_rows_to_read,omitempty"` + // Limits the maximum number of rows that can be read from a table when running a query. + // +kubebuilder:validation:Optional + MaxRowsToRead *float64 `json:"maxRowsToRead,omitempty" tf:"max_rows_to_read,omitempty"` -// Limits the maximum number of rows that can be read from a table for sorting. -// +kubebuilder:validation:Optional -MaxRowsToSort *float64 `json:"maxRowsToSort,omitempty" tf:"max_rows_to_sort,omitempty"` + // Limits the maximum number of rows that can be read from a table for sorting. + // +kubebuilder:validation:Optional + MaxRowsToSort *float64 `json:"maxRowsToSort,omitempty" tf:"max_rows_to_sort,omitempty"` -// Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. -// +kubebuilder:validation:Optional -MaxRowsToTransfer *float64 `json:"maxRowsToTransfer,omitempty" tf:"max_rows_to_transfer,omitempty"` + // Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. + // +kubebuilder:validation:Optional + MaxRowsToTransfer *float64 `json:"maxRowsToTransfer,omitempty" tf:"max_rows_to_transfer,omitempty"` -// Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns. -// +kubebuilder:validation:Optional -MaxTemporaryColumns *float64 `json:"maxTemporaryColumns,omitempty" tf:"max_temporary_columns,omitempty"` + // Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns. + // +kubebuilder:validation:Optional + MaxTemporaryColumns *float64 `json:"maxTemporaryColumns,omitempty" tf:"max_temporary_columns,omitempty"` -// The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries. Zero means unlimited. -// +kubebuilder:validation:Optional -MaxTemporaryDataOnDiskSizeForQuery *float64 `json:"maxTemporaryDataOnDiskSizeForQuery,omitempty" tf:"max_temporary_data_on_disk_size_for_query,omitempty"` + // The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries. Zero means unlimited. + // +kubebuilder:validation:Optional + MaxTemporaryDataOnDiskSizeForQuery *float64 `json:"maxTemporaryDataOnDiskSizeForQuery,omitempty" tf:"max_temporary_data_on_disk_size_for_query,omitempty"` -// The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running user queries. Zero means unlimited. -// +kubebuilder:validation:Optional -MaxTemporaryDataOnDiskSizeForUser *float64 `json:"maxTemporaryDataOnDiskSizeForUser,omitempty" tf:"max_temporary_data_on_disk_size_for_user,omitempty"` + // The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running user queries. Zero means unlimited. + // +kubebuilder:validation:Optional + MaxTemporaryDataOnDiskSizeForUser *float64 `json:"maxTemporaryDataOnDiskSizeForUser,omitempty" tf:"max_temporary_data_on_disk_size_for_user,omitempty"` -// Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns. -// +kubebuilder:validation:Optional -MaxTemporaryNonConstColumns *float64 `json:"maxTemporaryNonConstColumns,omitempty" tf:"max_temporary_non_const_columns,omitempty"` + // Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns. + // +kubebuilder:validation:Optional + MaxTemporaryNonConstColumns *float64 `json:"maxTemporaryNonConstColumns,omitempty" tf:"max_temporary_non_const_columns,omitempty"` -// The maximum number of query processing threads, excluding threads for retrieving data from remote servers. -// +kubebuilder:validation:Optional -MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` + // The maximum number of query processing threads, excluding threads for retrieving data from remote servers. + // +kubebuilder:validation:Optional + MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` -// It represents soft memory limit in case when hard limit is reached on user level. This value is used to compute overcommit ratio for the query. Zero means skip the query. -// +kubebuilder:validation:Optional -MemoryOvercommitRatioDenominator *float64 `json:"memoryOvercommitRatioDenominator,omitempty" tf:"memory_overcommit_ratio_denominator,omitempty"` + // It represents soft memory limit in case when hard limit is reached on user level. This value is used to compute overcommit ratio for the query. Zero means skip the query. + // +kubebuilder:validation:Optional + MemoryOvercommitRatioDenominator *float64 `json:"memoryOvercommitRatioDenominator,omitempty" tf:"memory_overcommit_ratio_denominator,omitempty"` -// It represents soft memory limit in case when hard limit is reached on global level. This value is used to compute overcommit ratio for the query. Zero means skip the query. -// +kubebuilder:validation:Optional -MemoryOvercommitRatioDenominatorForUser *float64 `json:"memoryOvercommitRatioDenominatorForUser,omitempty" tf:"memory_overcommit_ratio_denominator_for_user,omitempty"` + // It represents soft memory limit in case when hard limit is reached on global level. This value is used to compute overcommit ratio for the query. Zero means skip the query. + // +kubebuilder:validation:Optional + MemoryOvercommitRatioDenominatorForUser *float64 `json:"memoryOvercommitRatioDenominatorForUser,omitempty" tf:"memory_overcommit_ratio_denominator_for_user,omitempty"` -// Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Possible values: from 0 to 1. Default: 0. -// +kubebuilder:validation:Optional -MemoryProfilerSampleProbability *float64 `json:"memoryProfilerSampleProbability,omitempty" tf:"memory_profiler_sample_probability,omitempty"` + // Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Possible values: from 0 to 1. Default: 0. + // +kubebuilder:validation:Optional + MemoryProfilerSampleProbability *float64 `json:"memoryProfilerSampleProbability,omitempty" tf:"memory_profiler_sample_probability,omitempty"` -// Memory profiler step (in bytes). If the next query step requires more memory than this parameter specifies, the memory profiler collects the allocating stack trace. Values lower than a few megabytes slow down query processing. Default value: 4194304 (4 MB). Zero means disabled memory profiler. -// +kubebuilder:validation:Optional -MemoryProfilerStep *float64 `json:"memoryProfilerStep,omitempty" tf:"memory_profiler_step,omitempty"` + // Memory profiler step (in bytes). If the next query step requires more memory than this parameter specifies, the memory profiler collects the allocating stack trace. Values lower than a few megabytes slow down query processing. Default value: 4194304 (4 MB). Zero means disabled memory profiler. + // +kubebuilder:validation:Optional + MemoryProfilerStep *float64 `json:"memoryProfilerStep,omitempty" tf:"memory_profiler_step,omitempty"` -// Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. If the timeout is reached and memory is not freed, an exception is thrown. -// +kubebuilder:validation:Optional -MemoryUsageOvercommitMaxWaitMicroseconds *float64 `json:"memoryUsageOvercommitMaxWaitMicroseconds,omitempty" tf:"memory_usage_overcommit_max_wait_microseconds,omitempty"` + // Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. If the timeout is reached and memory is not freed, an exception is thrown. + // +kubebuilder:validation:Optional + MemoryUsageOvercommitMaxWaitMicroseconds *float64 `json:"memoryUsageOvercommitMaxWaitMicroseconds,omitempty" tf:"memory_usage_overcommit_max_wait_microseconds,omitempty"` -// If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks. -// +kubebuilder:validation:Optional -MergeTreeMaxBytesToUseCache *float64 `json:"mergeTreeMaxBytesToUseCache,omitempty" tf:"merge_tree_max_bytes_to_use_cache,omitempty"` + // If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks. + // +kubebuilder:validation:Optional + MergeTreeMaxBytesToUseCache *float64 `json:"mergeTreeMaxBytesToUseCache,omitempty" tf:"merge_tree_max_bytes_to_use_cache,omitempty"` -// If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks. -// +kubebuilder:validation:Optional -MergeTreeMaxRowsToUseCache *float64 `json:"mergeTreeMaxRowsToUseCache,omitempty" tf:"merge_tree_max_rows_to_use_cache,omitempty"` + // If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks. + // +kubebuilder:validation:Optional + MergeTreeMaxRowsToUseCache *float64 `json:"mergeTreeMaxRowsToUseCache,omitempty" tf:"merge_tree_max_rows_to_use_cache,omitempty"` -// If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads. -// +kubebuilder:validation:Optional -MergeTreeMinBytesForConcurrentRead *float64 `json:"mergeTreeMinBytesForConcurrentRead,omitempty" tf:"merge_tree_min_bytes_for_concurrent_read,omitempty"` + // If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads. + // +kubebuilder:validation:Optional + MergeTreeMinBytesForConcurrentRead *float64 `json:"mergeTreeMinBytesForConcurrentRead,omitempty" tf:"merge_tree_min_bytes_for_concurrent_read,omitempty"` -// If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads. -// +kubebuilder:validation:Optional -MergeTreeMinRowsForConcurrentRead *float64 `json:"mergeTreeMinRowsForConcurrentRead,omitempty" tf:"merge_tree_min_rows_for_concurrent_read,omitempty"` + // If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads. + // +kubebuilder:validation:Optional + MergeTreeMinRowsForConcurrentRead *float64 `json:"mergeTreeMinRowsForConcurrentRead,omitempty" tf:"merge_tree_min_rows_for_concurrent_read,omitempty"` -// The minimum data volume required for using direct I/O access to the storage disk. -// +kubebuilder:validation:Optional -MinBytesToUseDirectIo *float64 `json:"minBytesToUseDirectIo,omitempty" tf:"min_bytes_to_use_direct_io,omitempty"` + // The minimum data volume required for using direct I/O access to the storage disk. + // +kubebuilder:validation:Optional + MinBytesToUseDirectIo *float64 `json:"minBytesToUseDirectIo,omitempty" tf:"min_bytes_to_use_direct_io,omitempty"` -// How many times to potentially use a compiled chunk of code before running compilation. -// +kubebuilder:validation:Optional -MinCountToCompile *float64 `json:"minCountToCompile,omitempty" tf:"min_count_to_compile,omitempty"` + // How many times to potentially use a compiled chunk of code before running compilation. + // +kubebuilder:validation:Optional + MinCountToCompile *float64 `json:"minCountToCompile,omitempty" tf:"min_count_to_compile,omitempty"` -// A query waits for expression compilation process to complete prior to continuing execution. -// +kubebuilder:validation:Optional -MinCountToCompileExpression *float64 `json:"minCountToCompileExpression,omitempty" tf:"min_count_to_compile_expression,omitempty"` + // A query waits for expression compilation process to complete prior to continuing execution. + // +kubebuilder:validation:Optional + MinCountToCompileExpression *float64 `json:"minCountToCompileExpression,omitempty" tf:"min_count_to_compile_expression,omitempty"` -// Minimal execution speed in rows per second. -// +kubebuilder:validation:Optional -MinExecutionSpeed *float64 `json:"minExecutionSpeed,omitempty" tf:"min_execution_speed,omitempty"` + // Minimal execution speed in rows per second. + // +kubebuilder:validation:Optional + MinExecutionSpeed *float64 `json:"minExecutionSpeed,omitempty" tf:"min_execution_speed,omitempty"` -// Minimal execution speed in bytes per second. -// +kubebuilder:validation:Optional -MinExecutionSpeedBytes *float64 `json:"minExecutionSpeedBytes,omitempty" tf:"min_execution_speed_bytes,omitempty"` + // Minimal execution speed in bytes per second. + // +kubebuilder:validation:Optional + MinExecutionSpeedBytes *float64 `json:"minExecutionSpeedBytes,omitempty" tf:"min_execution_speed_bytes,omitempty"` -// Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query. -// +kubebuilder:validation:Optional -MinInsertBlockSizeBytes *float64 `json:"minInsertBlockSizeBytes,omitempty" tf:"min_insert_block_size_bytes,omitempty"` + // Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query. + // +kubebuilder:validation:Optional + MinInsertBlockSizeBytes *float64 `json:"minInsertBlockSizeBytes,omitempty" tf:"min_insert_block_size_bytes,omitempty"` -// Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query. -// +kubebuilder:validation:Optional -MinInsertBlockSizeRows *float64 `json:"minInsertBlockSizeRows,omitempty" tf:"min_insert_block_size_rows,omitempty"` + // Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query. + // +kubebuilder:validation:Optional + MinInsertBlockSizeRows *float64 `json:"minInsertBlockSizeRows,omitempty" tf:"min_insert_block_size_rows,omitempty"` -// If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. -// +kubebuilder:validation:Optional -OutputFormatJSONQuote64BitIntegers *bool `json:"outputFormatJsonQuote64BitIntegers,omitempty" tf:"output_format_json_quote_64bit_integers,omitempty"` + // If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. + // +kubebuilder:validation:Optional + OutputFormatJSONQuote64BitIntegers *bool `json:"outputFormatJsonQuote64BitIntegers,omitempty" tf:"output_format_json_quote_64bit_integers,omitempty"` -// Enables +nan, -nan, +inf, -inf outputs in JSON output format. -// +kubebuilder:validation:Optional -OutputFormatJSONQuoteDenormals *bool `json:"outputFormatJsonQuoteDenormals,omitempty" tf:"output_format_json_quote_denormals,omitempty"` + // Enables +nan, -nan, +inf, -inf outputs in JSON output format. + // +kubebuilder:validation:Optional + OutputFormatJSONQuoteDenormals *bool `json:"outputFormatJsonQuoteDenormals,omitempty" tf:"output_format_json_quote_denormals,omitempty"` -// Enables/disables preferable using the localhost replica when processing distributed queries. Default value: true. -// +kubebuilder:validation:Optional -PreferLocalhostReplica *bool `json:"preferLocalhostReplica,omitempty" tf:"prefer_localhost_replica,omitempty"` + // Enables/disables preferable using the localhost replica when processing distributed queries. Default value: true. + // +kubebuilder:validation:Optional + PreferLocalhostReplica *bool `json:"preferLocalhostReplica,omitempty" tf:"prefer_localhost_replica,omitempty"` -// Query priority. -// +kubebuilder:validation:Optional -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // Query priority. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// Quota accounting mode. -// +kubebuilder:validation:Optional -QuotaMode *string `json:"quotaMode,omitempty" tf:"quota_mode,omitempty"` + // Quota accounting mode. + // +kubebuilder:validation:Optional + QuotaMode *string `json:"quotaMode,omitempty" tf:"quota_mode,omitempty"` -// Sets behaviour on overflow while read. Possible values: -// +kubebuilder:validation:Optional -ReadOverflowMode *string `json:"readOverflowMode,omitempty" tf:"read_overflow_mode,omitempty"` + // Sets behaviour on overflow while read. Possible values: + // +kubebuilder:validation:Optional + ReadOverflowMode *string `json:"readOverflowMode,omitempty" tf:"read_overflow_mode,omitempty"` -// Restricts permissions for reading data, write data and change settings queries. -// +kubebuilder:validation:Optional -Readonly *float64 `json:"readonly,omitempty" tf:"readonly,omitempty"` + // Restricts permissions for reading data, write data and change settings queries. + // +kubebuilder:validation:Optional + Readonly *float64 `json:"readonly,omitempty" tf:"readonly,omitempty"` -// Receive timeout in milliseconds on the socket used for communicating with the client. -// +kubebuilder:validation:Optional -ReceiveTimeout *float64 `json:"receiveTimeout,omitempty" tf:"receive_timeout,omitempty"` + // Receive timeout in milliseconds on the socket used for communicating with the client. + // +kubebuilder:validation:Optional + ReceiveTimeout *float64 `json:"receiveTimeout,omitempty" tf:"receive_timeout,omitempty"` -// Method of reading data from remote filesystem, one of: read, threadpool. -// +kubebuilder:validation:Optional -RemoteFilesystemReadMethod *string `json:"remoteFilesystemReadMethod,omitempty" tf:"remote_filesystem_read_method,omitempty"` + // Method of reading data from remote filesystem, one of: read, threadpool. + // +kubebuilder:validation:Optional + RemoteFilesystemReadMethod *string `json:"remoteFilesystemReadMethod,omitempty" tf:"remote_filesystem_read_method,omitempty"` -// For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting. -// +kubebuilder:validation:Optional -ReplicationAlterPartitionsSync *float64 `json:"replicationAlterPartitionsSync,omitempty" tf:"replication_alter_partitions_sync,omitempty"` + // For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting. + // +kubebuilder:validation:Optional + ReplicationAlterPartitionsSync *float64 `json:"replicationAlterPartitionsSync,omitempty" tf:"replication_alter_partitions_sync,omitempty"` -// Sets behaviour on overflow in result. Possible values: -// +kubebuilder:validation:Optional -ResultOverflowMode *string `json:"resultOverflowMode,omitempty" tf:"result_overflow_mode,omitempty"` + // Sets behaviour on overflow in result. Possible values: + // +kubebuilder:validation:Optional + ResultOverflowMode *string `json:"resultOverflowMode,omitempty" tf:"result_overflow_mode,omitempty"` -// Enables or disables sequential consistency for SELECT queries. -// +kubebuilder:validation:Optional -SelectSequentialConsistency *bool `json:"selectSequentialConsistency,omitempty" tf:"select_sequential_consistency,omitempty"` + // Enables or disables sequential consistency for SELECT queries. + // +kubebuilder:validation:Optional + SelectSequentialConsistency *bool `json:"selectSequentialConsistency,omitempty" tf:"select_sequential_consistency,omitempty"` -// Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses. -// +kubebuilder:validation:Optional -SendProgressInHTTPHeaders *bool `json:"sendProgressInHttpHeaders,omitempty" tf:"send_progress_in_http_headers,omitempty"` + // Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses. + // +kubebuilder:validation:Optional + SendProgressInHTTPHeaders *bool `json:"sendProgressInHttpHeaders,omitempty" tf:"send_progress_in_http_headers,omitempty"` -// Send timeout in milliseconds on the socket used for communicating with the client. -// +kubebuilder:validation:Optional -SendTimeout *float64 `json:"sendTimeout,omitempty" tf:"send_timeout,omitempty"` + // Send timeout in milliseconds on the socket used for communicating with the client. + // +kubebuilder:validation:Optional + SendTimeout *float64 `json:"sendTimeout,omitempty" tf:"send_timeout,omitempty"` -// Sets behaviour on overflow in the set resulting. Possible values: -// +kubebuilder:validation:Optional -SetOverflowMode *string `json:"setOverflowMode,omitempty" tf:"set_overflow_mode,omitempty"` + // Sets behaviour on overflow in the set resulting. Possible values: + // +kubebuilder:validation:Optional + SetOverflowMode *string `json:"setOverflowMode,omitempty" tf:"set_overflow_mode,omitempty"` -// Enables or disables silently skipping of unavailable shards. -// +kubebuilder:validation:Optional -SkipUnavailableShards *bool `json:"skipUnavailableShards,omitempty" tf:"skip_unavailable_shards,omitempty"` + // Enables or disables silently skipping of unavailable shards. + // +kubebuilder:validation:Optional + SkipUnavailableShards *bool `json:"skipUnavailableShards,omitempty" tf:"skip_unavailable_shards,omitempty"` -// Sets behaviour on overflow while sort. Possible values: -// +kubebuilder:validation:Optional -SortOverflowMode *string `json:"sortOverflowMode,omitempty" tf:"sort_overflow_mode,omitempty"` + // Sets behaviour on overflow while sort. Possible values: + // +kubebuilder:validation:Optional + SortOverflowMode *string `json:"sortOverflowMode,omitempty" tf:"sort_overflow_mode,omitempty"` -// Timeout (in seconds) between checks of execution speed. It is checked that execution speed is not less that specified in min_execution_speed parameter. Must be at least 1000. -// +kubebuilder:validation:Optional -TimeoutBeforeCheckingExecutionSpeed *float64 `json:"timeoutBeforeCheckingExecutionSpeed,omitempty" tf:"timeout_before_checking_execution_speed,omitempty"` + // Timeout (in seconds) between checks of execution speed. It is checked that execution speed is not less that specified in min_execution_speed parameter. Must be at least 1000. + // +kubebuilder:validation:Optional + TimeoutBeforeCheckingExecutionSpeed *float64 `json:"timeoutBeforeCheckingExecutionSpeed,omitempty" tf:"timeout_before_checking_execution_speed,omitempty"` -// Sets behaviour on overflow. Possible values: -// +kubebuilder:validation:Optional -TimeoutOverflowMode *string `json:"timeoutOverflowMode,omitempty" tf:"timeout_overflow_mode,omitempty"` + // Sets behaviour on overflow. Possible values: + // +kubebuilder:validation:Optional + TimeoutOverflowMode *string `json:"timeoutOverflowMode,omitempty" tf:"timeout_overflow_mode,omitempty"` -// Sets behaviour on overflow. Possible values: -// +kubebuilder:validation:Optional -TransferOverflowMode *string `json:"transferOverflowMode,omitempty" tf:"transfer_overflow_mode,omitempty"` + // Sets behaviour on overflow. Possible values: + // +kubebuilder:validation:Optional + TransferOverflowMode *string `json:"transferOverflowMode,omitempty" tf:"transfer_overflow_mode,omitempty"` -// Enables equality of NULL values for IN operator. -// +kubebuilder:validation:Optional -TransformNullIn *bool `json:"transformNullIn,omitempty" tf:"transform_null_in,omitempty"` + // Enables equality of NULL values for IN operator. + // +kubebuilder:validation:Optional + TransformNullIn *bool `json:"transformNullIn,omitempty" tf:"transform_null_in,omitempty"` -// Enables hedged requests logic for remote queries. It allows to establish many connections with different replicas for query. New connection is enabled in case existent connection(s) with replica(s) were not established within hedged_connection_timeout or no data was received within receive_data_timeout. Query uses the first connection which send non empty progress packet (or data packet, if allow_changing_replica_until_first_data_packet); other connections are cancelled. Queries with max_parallel_replicas > 1 are supported. Default value: true. -// +kubebuilder:validation:Optional -UseHedgedRequests *bool `json:"useHedgedRequests,omitempty" tf:"use_hedged_requests,omitempty"` + // Enables hedged requests logic for remote queries. It allows to establish many connections with different replicas for query. New connection is enabled in case existent connection(s) with replica(s) were not established within hedged_connection_timeout or no data was received within receive_data_timeout. Query uses the first connection which send non empty progress packet (or data packet, if allow_changing_replica_until_first_data_packet); other connections are cancelled. Queries with max_parallel_replicas > 1 are supported. Default value: true. + // +kubebuilder:validation:Optional + UseHedgedRequests *bool `json:"useHedgedRequests,omitempty" tf:"use_hedged_requests,omitempty"` -// Whether to use a cache of uncompressed blocks. -// +kubebuilder:validation:Optional -UseUncompressedCache *bool `json:"useUncompressedCache,omitempty" tf:"use_uncompressed_cache,omitempty"` + // Whether to use a cache of uncompressed blocks. + // +kubebuilder:validation:Optional + UseUncompressedCache *bool `json:"useUncompressedCache,omitempty" tf:"use_uncompressed_cache,omitempty"` -// Enables waiting for processing of asynchronous insertion. If enabled, server returns OK only after the data is inserted. -// +kubebuilder:validation:Optional -WaitForAsyncInsert *bool `json:"waitForAsyncInsert,omitempty" tf:"wait_for_async_insert,omitempty"` + // Enables waiting for processing of asynchronous insertion. If enabled, server returns OK only after the data is inserted. + // +kubebuilder:validation:Optional + WaitForAsyncInsert *bool `json:"waitForAsyncInsert,omitempty" tf:"wait_for_async_insert,omitempty"` -// The timeout (in seconds) for waiting for processing of asynchronous insertion. Value must be at least 1000 (1 second). -// +kubebuilder:validation:Optional -WaitForAsyncInsertTimeout *float64 `json:"waitForAsyncInsertTimeout,omitempty" tf:"wait_for_async_insert_timeout,omitempty"` + // The timeout (in seconds) for waiting for processing of asynchronous insertion. Value must be at least 1000 (1 second). + // +kubebuilder:validation:Optional + WaitForAsyncInsertTimeout *float64 `json:"waitForAsyncInsertTimeout,omitempty" tf:"wait_for_async_insert_timeout,omitempty"` } - type ZookeeperInitParameters struct { - -// Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. -Resources []ZookeeperResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. + Resources []ZookeeperResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` } - type ZookeeperObservation struct { - -// Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. -Resources []ZookeeperResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. + Resources []ZookeeperResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` } - type ZookeeperParameters struct { - -// Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []ZookeeperResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []ZookeeperResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` } - type ZookeeperResourcesInitParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ZookeeperResourcesObservation struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ZookeeperResourcesParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } // ClickhouseClusterSpec defines the desired state of ClickhouseCluster type ClickhouseClusterSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider ClickhouseClusterParameters `json:"forProvider"` + ForProvider ClickhouseClusterParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -4097,20 +3898,19 @@ type ClickhouseClusterSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider ClickhouseClusterInitParameters `json:"initProvider,omitempty"` + InitProvider ClickhouseClusterInitParameters `json:"initProvider,omitempty"` } // ClickhouseClusterStatus defines the observed state of ClickhouseCluster. type ClickhouseClusterStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider ClickhouseClusterObservation `json:"atProvider,omitempty"` + AtProvider ClickhouseClusterObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // ClickhouseCluster is the Schema for the ClickhouseClusters API. Manages a ClickHouse cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -4120,11 +3920,11 @@ type ClickhouseClusterStatus struct { type ClickhouseCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.host) || (has(self.initProvider) && has(self.initProvider.host))",message="spec.forProvider.host is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec ClickhouseClusterSpec `json:"spec"` - Status ClickhouseClusterStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.host) || (has(self.initProvider) && has(self.initProvider.host))",message="spec.forProvider.host is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ClickhouseClusterSpec `json:"spec"` + Status ClickhouseClusterStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_elasticsearchcluster_terraformed.go b/apis/mdb/v1alpha1/zz_elasticsearchcluster_terraformed.go new file mode 100755 index 0000000..4e6c78b --- /dev/null +++ b/apis/mdb/v1alpha1/zz_elasticsearchcluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ElasticsearchCluster +func (mg *ElasticsearchCluster) GetTerraformResourceType() string { + return "yandex_mdb_elasticsearch_cluster" +} + +// GetConnectionDetailsMapping for this ElasticsearchCluster +func (tr *ElasticsearchCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"config[*].admin_password": "config[*].adminPasswordSecretRef"} +} + +// GetObservation of this ElasticsearchCluster +func (tr *ElasticsearchCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ElasticsearchCluster +func (tr *ElasticsearchCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ElasticsearchCluster +func (tr *ElasticsearchCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ElasticsearchCluster +func (tr *ElasticsearchCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ElasticsearchCluster +func (tr *ElasticsearchCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ElasticsearchCluster +func (tr *ElasticsearchCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ElasticsearchCluster +func (tr *ElasticsearchCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ElasticsearchCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ElasticsearchCluster) LateInitialize(attrs []byte) (bool, error) { + params := &ElasticsearchClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ElasticsearchCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_elasticsearchcluster_types.go b/apis/mdb/v1alpha1/zz_elasticsearchcluster_types.go index ec674e7..a7e734b 100755 --- a/apis/mdb/v1alpha1/zz_elasticsearchcluster_types.go +++ b/apis/mdb/v1alpha1/zz_elasticsearchcluster_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,576 +7,525 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type DataNodeInitParameters struct { - -// Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. -Resources []DataNodeResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. + Resources []DataNodeResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` } - type DataNodeObservation struct { - -// Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. -Resources []DataNodeResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. + Resources []DataNodeResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` } - type DataNodeParameters struct { - -// Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []DataNodeResourcesParameters `json:"resources" tf:"resources,omitempty"` + // Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []DataNodeResourcesParameters `json:"resources" tf:"resources,omitempty"` } - type DataNodeResourcesInitParameters struct { + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of Elasticsearch hosts. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of Elasticsearch hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type DataNodeResourcesObservation struct { + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of Elasticsearch hosts. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of Elasticsearch hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type DataNodeResourcesParameters struct { + // Volume of the storage available to a host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` - -// Type of the storage of Elasticsearch hosts. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` + // Type of the storage of Elasticsearch hosts. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type ElasticsearchClusterConfigInitParameters struct { + // Password for admin user of Elasticsearch. + AdminPasswordSecretRef v1.SecretKeySelector `json:"adminPasswordSecretRef" tf:"-"` -// Password for admin user of Elasticsearch. -AdminPasswordSecretRef v1.SecretKeySelector `json:"adminPasswordSecretRef" tf:"-"` - -// Configuration for Elasticsearch data nodes subcluster. The structure is documented below. -DataNode []DataNodeInitParameters `json:"dataNode,omitempty" tf:"data_node,omitempty"` + // Configuration for Elasticsearch data nodes subcluster. The structure is documented below. + DataNode []DataNodeInitParameters `json:"dataNode,omitempty" tf:"data_node,omitempty"` -// Edition of Elasticsearch. For more information, see the official documentation. -Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + // Edition of Elasticsearch. For more information, see the official documentation. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` -// Configuration for Elasticsearch master nodes subcluster. The structure is documented below. -MasterNode []MasterNodeInitParameters `json:"masterNode,omitempty" tf:"master_node,omitempty"` + // Configuration for Elasticsearch master nodes subcluster. The structure is documented below. + MasterNode []MasterNodeInitParameters `json:"masterNode,omitempty" tf:"master_node,omitempty"` -// A set of Elasticsearch plugins to install. -// +listType=set -Plugins []*string `json:"plugins,omitempty" tf:"plugins,omitempty"` + // A set of Elasticsearch plugins to install. + // +listType=set + Plugins []*string `json:"plugins,omitempty" tf:"plugins,omitempty"` -// Version of Elasticsearch. -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of Elasticsearch. + Version *string `json:"version,omitempty" tf:"version,omitempty"` } - type ElasticsearchClusterConfigObservation struct { + // Configuration for Elasticsearch data nodes subcluster. The structure is documented below. + DataNode []DataNodeObservation `json:"dataNode,omitempty" tf:"data_node,omitempty"` -// Configuration for Elasticsearch data nodes subcluster. The structure is documented below. -DataNode []DataNodeObservation `json:"dataNode,omitempty" tf:"data_node,omitempty"` - -// Edition of Elasticsearch. For more information, see the official documentation. -Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + // Edition of Elasticsearch. For more information, see the official documentation. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` -// Configuration for Elasticsearch master nodes subcluster. The structure is documented below. -MasterNode []MasterNodeObservation `json:"masterNode,omitempty" tf:"master_node,omitempty"` + // Configuration for Elasticsearch master nodes subcluster. The structure is documented below. + MasterNode []MasterNodeObservation `json:"masterNode,omitempty" tf:"master_node,omitempty"` -// A set of Elasticsearch plugins to install. -// +listType=set -Plugins []*string `json:"plugins,omitempty" tf:"plugins,omitempty"` + // A set of Elasticsearch plugins to install. + // +listType=set + Plugins []*string `json:"plugins,omitempty" tf:"plugins,omitempty"` -// Version of Elasticsearch. -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of Elasticsearch. + Version *string `json:"version,omitempty" tf:"version,omitempty"` } - type ElasticsearchClusterConfigParameters struct { + // Password for admin user of Elasticsearch. + // +kubebuilder:validation:Optional + AdminPasswordSecretRef v1.SecretKeySelector `json:"adminPasswordSecretRef" tf:"-"` -// Password for admin user of Elasticsearch. -// +kubebuilder:validation:Optional -AdminPasswordSecretRef v1.SecretKeySelector `json:"adminPasswordSecretRef" tf:"-"` + // Configuration for Elasticsearch data nodes subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + DataNode []DataNodeParameters `json:"dataNode" tf:"data_node,omitempty"` -// Configuration for Elasticsearch data nodes subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -DataNode []DataNodeParameters `json:"dataNode" tf:"data_node,omitempty"` + // Edition of Elasticsearch. For more information, see the official documentation. + // +kubebuilder:validation:Optional + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` -// Edition of Elasticsearch. For more information, see the official documentation. -// +kubebuilder:validation:Optional -Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + // Configuration for Elasticsearch master nodes subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + MasterNode []MasterNodeParameters `json:"masterNode,omitempty" tf:"master_node,omitempty"` -// Configuration for Elasticsearch master nodes subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -MasterNode []MasterNodeParameters `json:"masterNode,omitempty" tf:"master_node,omitempty"` + // A set of Elasticsearch plugins to install. + // +kubebuilder:validation:Optional + // +listType=set + Plugins []*string `json:"plugins,omitempty" tf:"plugins,omitempty"` -// A set of Elasticsearch plugins to install. -// +kubebuilder:validation:Optional -// +listType=set -Plugins []*string `json:"plugins,omitempty" tf:"plugins,omitempty"` - -// Version of Elasticsearch. -// +kubebuilder:validation:Optional -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of Elasticsearch. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` } - type ElasticsearchClusterHostInitParameters struct { + // Sets whether the host should get a public IP address on creation. Can be either true or false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Sets whether the host should get a public IP address on creation. Can be either true or false. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // User defined host name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// User defined host name. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // The type of the host to be deployed. Can be either DATA_NODE or MASTER_NODE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// The type of the host to be deployed. Can be either DATA_NODE or MASTER_NODE. -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// The availability zone where the Elasticsearch host will be created. For more information see the official documentation. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // The availability zone where the Elasticsearch host will be created. For more information see the official documentation. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type ElasticsearchClusterHostObservation struct { + // Sets whether the host should get a public IP address on creation. Can be either true or false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Sets whether the host should get a public IP address on creation. Can be either true or false. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // The fully qualified domain name of the host. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` -// The fully qualified domain name of the host. -Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + // User defined host name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// User defined host name. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The type of the host to be deployed. Can be either DATA_NODE or MASTER_NODE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// The type of the host to be deployed. Can be either DATA_NODE or MASTER_NODE. -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// The availability zone where the Elasticsearch host will be created. For more information see the official documentation. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // The availability zone where the Elasticsearch host will be created. For more information see the official documentation. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type ElasticsearchClusterHostParameters struct { + // Sets whether the host should get a public IP address on creation. Can be either true or false. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Sets whether the host should get a public IP address on creation. Can be either true or false. -// +kubebuilder:validation:Optional -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // User defined host name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// User defined host name. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // The type of the host to be deployed. Can be either DATA_NODE or MASTER_NODE. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` -// The type of the host to be deployed. Can be either DATA_NODE or MASTER_NODE. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` - -// The availability zone where the Elasticsearch host will be created. For more information see the official documentation. -// +kubebuilder:validation:Optional -Zone *string `json:"zone" tf:"zone,omitempty"` + // The availability zone where the Elasticsearch host will be created. For more information see the official documentation. + // +kubebuilder:validation:Optional + Zone *string `json:"zone" tf:"zone,omitempty"` } - type ElasticsearchClusterInitParameters struct { + // Configuration of the Elasticsearch cluster. The structure is documented below. + Config []ElasticsearchClusterConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` -// Configuration of the Elasticsearch cluster. The structure is documented below. -Config []ElasticsearchClusterConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` - -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the Elasticsearch cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Elasticsearch cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Deployment environment of the Elasticsearch cluster. Can be either PRESTABLE or PRODUCTION. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Deployment environment of the Elasticsearch cluster. Can be either PRESTABLE or PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A host of the Elasticsearch cluster. The structure is documented below. -Host []ElasticsearchClusterHostInitParameters `json:"host,omitempty" tf:"host,omitempty"` + // A host of the Elasticsearch cluster. The structure is documented below. + Host []ElasticsearchClusterHostInitParameters `json:"host,omitempty" tf:"host,omitempty"` -// A set of key/value label pairs to assign to the Elasticsearch cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Elasticsearch cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -MaintenanceWindow []ElasticsearchClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + MaintenanceWindow []ElasticsearchClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Name of the Elasticsearch cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Elasticsearch cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network, to which the Elasticsearch cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network, to which the Elasticsearch cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` -// ID of the service account authorized for this cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // ID of the service account authorized for this cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Reference to a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` -// Selector for a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` } - type ElasticsearchClusterMaintenanceWindowInitParameters struct { + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type ElasticsearchClusterMaintenanceWindowObservation struct { + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type ElasticsearchClusterMaintenanceWindowParameters struct { + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. -// +kubebuilder:validation:Optional -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. -// +kubebuilder:validation:Optional -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` } - type ElasticsearchClusterObservation struct { + // Configuration of the Elasticsearch cluster. The structure is documented below. + Config []ElasticsearchClusterConfigObservation `json:"config,omitempty" tf:"config,omitempty"` -// Configuration of the Elasticsearch cluster. The structure is documented below. -Config []ElasticsearchClusterConfigObservation `json:"config,omitempty" tf:"config,omitempty"` - -// Creation timestamp of the key. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the Elasticsearch cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Elasticsearch cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Deployment environment of the Elasticsearch cluster. Can be either PRESTABLE or PRODUCTION. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Deployment environment of the Elasticsearch cluster. Can be either PRESTABLE or PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Aggregated health of the cluster. Can be either ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation. -Health *string `json:"health,omitempty" tf:"health,omitempty"` + // Aggregated health of the cluster. Can be either ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation. + Health *string `json:"health,omitempty" tf:"health,omitempty"` -// A host of the Elasticsearch cluster. The structure is documented below. -Host []ElasticsearchClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` + // A host of the Elasticsearch cluster. The structure is documented below. + Host []ElasticsearchClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the Elasticsearch cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Elasticsearch cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -MaintenanceWindow []ElasticsearchClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + MaintenanceWindow []ElasticsearchClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Name of the Elasticsearch cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Elasticsearch cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network, to which the Elasticsearch cluster belongs. -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network, to which the Elasticsearch cluster belongs. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// ID of the service account authorized for this cluster. -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // ID of the service account authorized for this cluster. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation. + Status *string `json:"status,omitempty" tf:"status,omitempty"` } - type ElasticsearchClusterParameters struct { - -// Configuration of the Elasticsearch cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Config []ElasticsearchClusterConfigParameters `json:"config,omitempty" tf:"config,omitempty"` - -// Inhibits deletion of the cluster. Can be either true or false. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - -// Description of the Elasticsearch cluster. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// Deployment environment of the Elasticsearch cluster. Can be either PRESTABLE or PRODUCTION. -// +kubebuilder:validation:Optional -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` - -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` - -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` - -// A host of the Elasticsearch cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Host []ElasticsearchClusterHostParameters `json:"host,omitempty" tf:"host,omitempty"` - -// A set of key/value label pairs to assign to the Elasticsearch cluster. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - -// +kubebuilder:validation:Optional -MaintenanceWindow []ElasticsearchClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` - -// Name of the Elasticsearch cluster. Provided by the client when the cluster is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// ID of the network, to which the Elasticsearch cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` - -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` - -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` - -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` - -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` - -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` - -// ID of the service account authorized for this cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` - -// Reference to a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` - -// Selector for a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + // Configuration of the Elasticsearch cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Config []ElasticsearchClusterConfigParameters `json:"config,omitempty" tf:"config,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Elasticsearch cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Deployment environment of the Elasticsearch cluster. Can be either PRESTABLE or PRODUCTION. + // +kubebuilder:validation:Optional + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A host of the Elasticsearch cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Host []ElasticsearchClusterHostParameters `json:"host,omitempty" tf:"host,omitempty"` + + // A set of key/value label pairs to assign to the Elasticsearch cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // +kubebuilder:validation:Optional + MaintenanceWindow []ElasticsearchClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Name of the Elasticsearch cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the Elasticsearch cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // ID of the service account authorized for this cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` } - type MasterNodeInitParameters struct { - -// Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. -Resources []MasterNodeResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. + Resources []MasterNodeResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` } - type MasterNodeObservation struct { - -// Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. -Resources []MasterNodeResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. + Resources []MasterNodeResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` } - type MasterNodeParameters struct { - -// Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []MasterNodeResourcesParameters `json:"resources" tf:"resources,omitempty"` + // Resources allocated to hosts of the Elasticsearch master nodes subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []MasterNodeResourcesParameters `json:"resources" tf:"resources,omitempty"` } - type MasterNodeResourcesInitParameters struct { + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of Elasticsearch hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of Elasticsearch hosts. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type MasterNodeResourcesObservation struct { + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of Elasticsearch hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of Elasticsearch hosts. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type MasterNodeResourcesParameters struct { + // Volume of the storage available to a host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + // Type of the storage of Elasticsearch hosts. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// Type of the storage of Elasticsearch hosts. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` - -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } // ElasticsearchClusterSpec defines the desired state of ElasticsearchCluster type ElasticsearchClusterSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider ElasticsearchClusterParameters `json:"forProvider"` + ForProvider ElasticsearchClusterParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -589,20 +536,19 @@ type ElasticsearchClusterSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider ElasticsearchClusterInitParameters `json:"initProvider,omitempty"` + InitProvider ElasticsearchClusterInitParameters `json:"initProvider,omitempty"` } // ElasticsearchClusterStatus defines the observed state of ElasticsearchCluster. type ElasticsearchClusterStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider ElasticsearchClusterObservation `json:"atProvider,omitempty"` + AtProvider ElasticsearchClusterObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // ElasticsearchCluster is the Schema for the ElasticsearchClusters API. Manages a Elasticsearch cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -612,11 +558,11 @@ type ElasticsearchClusterStatus struct { type ElasticsearchCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.config) || (has(self.initProvider) && has(self.initProvider.config))",message="spec.forProvider.config is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec ElasticsearchClusterSpec `json:"spec"` - Status ElasticsearchClusterStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.config) || (has(self.initProvider) && has(self.initProvider.config))",message="spec.forProvider.config is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ElasticsearchClusterSpec `json:"spec"` + Status ElasticsearchClusterStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_generated.conversion_hubs.go b/apis/mdb/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..68e6f35 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,57 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *ClickhouseCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ElasticsearchCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GreenplumCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *KafkaCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *KafkaConnector) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *KafkaTopic) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *KafkaUser) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MongodbCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MongodbDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MongodbUser) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MySQLCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MySQLDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MySQLUser) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PostgresqlCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PostgresqlDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PostgresqlUser) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RedisCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SqlserverCluster) Hub() {} diff --git a/apis/mdb/v1alpha1/zz_generated.deepcopy.go b/apis/mdb/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..7f59048 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,27994 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessInitParameters) DeepCopyInto(out *AccessInitParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.Metrika != nil { + in, out := &in.Metrika, &out.Metrika + *out = new(bool) + **out = **in + } + if in.Serverless != nil { + in, out := &in.Serverless, &out.Serverless + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } + if in.YandexQuery != nil { + in, out := &in.YandexQuery, &out.YandexQuery + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessInitParameters. +func (in *AccessInitParameters) DeepCopy() *AccessInitParameters { + if in == nil { + return nil + } + out := new(AccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessObservation) DeepCopyInto(out *AccessObservation) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.Metrika != nil { + in, out := &in.Metrika, &out.Metrika + *out = new(bool) + **out = **in + } + if in.Serverless != nil { + in, out := &in.Serverless, &out.Serverless + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } + if in.YandexQuery != nil { + in, out := &in.YandexQuery, &out.YandexQuery + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessObservation. +func (in *AccessObservation) DeepCopy() *AccessObservation { + if in == nil { + return nil + } + out := new(AccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessParameters) DeepCopyInto(out *AccessParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.Metrika != nil { + in, out := &in.Metrika, &out.Metrika + *out = new(bool) + **out = **in + } + if in.Serverless != nil { + in, out := &in.Serverless, &out.Serverless + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } + if in.YandexQuery != nil { + in, out := &in.YandexQuery, &out.YandexQuery + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessParameters. +func (in *AccessParameters) DeepCopy() *AccessParameters { + if in == nil { + return nil + } + out := new(AccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyzeAndVacuumInitParameters) DeepCopyInto(out *AnalyzeAndVacuumInitParameters) { + *out = *in + if in.AnalyzeTimeout != nil { + in, out := &in.AnalyzeTimeout, &out.AnalyzeTimeout + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.VacuumTimeout != nil { + in, out := &in.VacuumTimeout, &out.VacuumTimeout + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyzeAndVacuumInitParameters. +func (in *AnalyzeAndVacuumInitParameters) DeepCopy() *AnalyzeAndVacuumInitParameters { + if in == nil { + return nil + } + out := new(AnalyzeAndVacuumInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyzeAndVacuumObservation) DeepCopyInto(out *AnalyzeAndVacuumObservation) { + *out = *in + if in.AnalyzeTimeout != nil { + in, out := &in.AnalyzeTimeout, &out.AnalyzeTimeout + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.VacuumTimeout != nil { + in, out := &in.VacuumTimeout, &out.VacuumTimeout + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyzeAndVacuumObservation. +func (in *AnalyzeAndVacuumObservation) DeepCopy() *AnalyzeAndVacuumObservation { + if in == nil { + return nil + } + out := new(AnalyzeAndVacuumObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyzeAndVacuumParameters) DeepCopyInto(out *AnalyzeAndVacuumParameters) { + *out = *in + if in.AnalyzeTimeout != nil { + in, out := &in.AnalyzeTimeout, &out.AnalyzeTimeout + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.VacuumTimeout != nil { + in, out := &in.VacuumTimeout, &out.VacuumTimeout + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyzeAndVacuumParameters. +func (in *AnalyzeAndVacuumParameters) DeepCopy() *AnalyzeAndVacuumParameters { + if in == nil { + return nil + } + out := new(AnalyzeAndVacuumParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditLogInitParameters) DeepCopyInto(out *AuditLogInitParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.RuntimeConfiguration != nil { + in, out := &in.RuntimeConfiguration, &out.RuntimeConfiguration + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditLogInitParameters. +func (in *AuditLogInitParameters) DeepCopy() *AuditLogInitParameters { + if in == nil { + return nil + } + out := new(AuditLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditLogObservation) DeepCopyInto(out *AuditLogObservation) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.RuntimeConfiguration != nil { + in, out := &in.RuntimeConfiguration, &out.RuntimeConfiguration + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditLogObservation. +func (in *AuditLogObservation) DeepCopy() *AuditLogObservation { + if in == nil { + return nil + } + out := new(AuditLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditLogParameters) DeepCopyInto(out *AuditLogParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.RuntimeConfiguration != nil { + in, out := &in.RuntimeConfiguration, &out.RuntimeConfiguration + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditLogParameters. +func (in *AuditLogParameters) DeepCopy() *AuditLogParameters { + if in == nil { + return nil + } + out := new(AuditLogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackgroundActivitiesInitParameters) DeepCopyInto(out *BackgroundActivitiesInitParameters) { + *out = *in + if in.AnalyzeAndVacuum != nil { + in, out := &in.AnalyzeAndVacuum, &out.AnalyzeAndVacuum + *out = make([]AnalyzeAndVacuumInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryKillerIdle != nil { + in, out := &in.QueryKillerIdle, &out.QueryKillerIdle + *out = make([]QueryKillerIdleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryKillerIdleInTransaction != nil { + in, out := &in.QueryKillerIdleInTransaction, &out.QueryKillerIdleInTransaction + *out = make([]QueryKillerIdleInTransactionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryKillerLongRunning != nil { + in, out := &in.QueryKillerLongRunning, &out.QueryKillerLongRunning + *out = make([]QueryKillerLongRunningInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackgroundActivitiesInitParameters. +func (in *BackgroundActivitiesInitParameters) DeepCopy() *BackgroundActivitiesInitParameters { + if in == nil { + return nil + } + out := new(BackgroundActivitiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackgroundActivitiesObservation) DeepCopyInto(out *BackgroundActivitiesObservation) { + *out = *in + if in.AnalyzeAndVacuum != nil { + in, out := &in.AnalyzeAndVacuum, &out.AnalyzeAndVacuum + *out = make([]AnalyzeAndVacuumObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryKillerIdle != nil { + in, out := &in.QueryKillerIdle, &out.QueryKillerIdle + *out = make([]QueryKillerIdleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryKillerIdleInTransaction != nil { + in, out := &in.QueryKillerIdleInTransaction, &out.QueryKillerIdleInTransaction + *out = make([]QueryKillerIdleInTransactionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryKillerLongRunning != nil { + in, out := &in.QueryKillerLongRunning, &out.QueryKillerLongRunning + *out = make([]QueryKillerLongRunningObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackgroundActivitiesObservation. +func (in *BackgroundActivitiesObservation) DeepCopy() *BackgroundActivitiesObservation { + if in == nil { + return nil + } + out := new(BackgroundActivitiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackgroundActivitiesParameters) DeepCopyInto(out *BackgroundActivitiesParameters) { + *out = *in + if in.AnalyzeAndVacuum != nil { + in, out := &in.AnalyzeAndVacuum, &out.AnalyzeAndVacuum + *out = make([]AnalyzeAndVacuumParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryKillerIdle != nil { + in, out := &in.QueryKillerIdle, &out.QueryKillerIdle + *out = make([]QueryKillerIdleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryKillerIdleInTransaction != nil { + in, out := &in.QueryKillerIdleInTransaction, &out.QueryKillerIdleInTransaction + *out = make([]QueryKillerIdleInTransactionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryKillerLongRunning != nil { + in, out := &in.QueryKillerLongRunning, &out.QueryKillerLongRunning + *out = make([]QueryKillerLongRunningParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackgroundActivitiesParameters. +func (in *BackgroundActivitiesParameters) DeepCopy() *BackgroundActivitiesParameters { + if in == nil { + return nil + } + out := new(BackgroundActivitiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupWindowStartInitParameters) DeepCopyInto(out *BackupWindowStartInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupWindowStartInitParameters. +func (in *BackupWindowStartInitParameters) DeepCopy() *BackupWindowStartInitParameters { + if in == nil { + return nil + } + out := new(BackupWindowStartInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupWindowStartObservation) DeepCopyInto(out *BackupWindowStartObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupWindowStartObservation. +func (in *BackupWindowStartObservation) DeepCopy() *BackupWindowStartObservation { + if in == nil { + return nil + } + out := new(BackupWindowStartObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupWindowStartParameters) DeepCopyInto(out *BackupWindowStartParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupWindowStartParameters. +func (in *BackupWindowStartParameters) DeepCopy() *BackupWindowStartParameters { + if in == nil { + return nil + } + out := new(BackupWindowStartParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseCluster) DeepCopyInto(out *ClickhouseCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseCluster. +func (in *ClickhouseCluster) DeepCopy() *ClickhouseCluster { + if in == nil { + return nil + } + out := new(ClickhouseCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClickhouseCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseClusterInitParameters) DeepCopyInto(out *ClickhouseClusterInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]AccessInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminPasswordSecretRef != nil { + in, out := &in.AdminPasswordSecretRef, &out.AdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]BackupWindowStartInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Clickhouse != nil { + in, out := &in.Clickhouse, &out.Clickhouse + *out = make([]ClickhouseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudStorage != nil { + in, out := &in.CloudStorage, &out.CloudStorage + *out = make([]CloudStorageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CopySchemaOnNewHosts != nil { + in, out := &in.CopySchemaOnNewHosts, &out.CopySchemaOnNewHosts + *out = new(bool) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]DatabaseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EmbeddedKeeper != nil { + in, out := &in.EmbeddedKeeper, &out.EmbeddedKeeper + *out = new(bool) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FormatSchema != nil { + in, out := &in.FormatSchema, &out.FormatSchema + *out = make([]FormatSchemaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]HostInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MLModel != nil { + in, out := &in.MLModel, &out.MLModel + *out = make([]MLModelInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SQLDatabaseManagement != nil { + in, out := &in.SQLDatabaseManagement, &out.SQLDatabaseManagement + *out = new(bool) + **out = **in + } + if in.SQLUserManagement != nil { + in, out := &in.SQLUserManagement, &out.SQLUserManagement + *out = new(bool) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Shard != nil { + in, out := &in.Shard, &out.Shard + *out = make([]ShardInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardGroup != nil { + in, out := &in.ShardGroup, &out.ShardGroup + *out = make([]ShardGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]UserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zookeeper != nil { + in, out := &in.Zookeeper, &out.Zookeeper + *out = make([]ZookeeperInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseClusterInitParameters. +func (in *ClickhouseClusterInitParameters) DeepCopy() *ClickhouseClusterInitParameters { + if in == nil { + return nil + } + out := new(ClickhouseClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseClusterList) DeepCopyInto(out *ClickhouseClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClickhouseCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseClusterList. +func (in *ClickhouseClusterList) DeepCopy() *ClickhouseClusterList { + if in == nil { + return nil + } + out := new(ClickhouseClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClickhouseClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseClusterObservation) DeepCopyInto(out *ClickhouseClusterObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]AccessObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]BackupWindowStartObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Clickhouse != nil { + in, out := &in.Clickhouse, &out.Clickhouse + *out = make([]ClickhouseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudStorage != nil { + in, out := &in.CloudStorage, &out.CloudStorage + *out = make([]CloudStorageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CopySchemaOnNewHosts != nil { + in, out := &in.CopySchemaOnNewHosts, &out.CopySchemaOnNewHosts + *out = new(bool) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]DatabaseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EmbeddedKeeper != nil { + in, out := &in.EmbeddedKeeper, &out.EmbeddedKeeper + *out = new(bool) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FormatSchema != nil { + in, out := &in.FormatSchema, &out.FormatSchema + *out = make([]FormatSchemaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]HostObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MLModel != nil { + in, out := &in.MLModel, &out.MLModel + *out = make([]MLModelObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.SQLDatabaseManagement != nil { + in, out := &in.SQLDatabaseManagement, &out.SQLDatabaseManagement + *out = new(bool) + **out = **in + } + if in.SQLUserManagement != nil { + in, out := &in.SQLUserManagement, &out.SQLUserManagement + *out = new(bool) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Shard != nil { + in, out := &in.Shard, &out.Shard + *out = make([]ShardObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardGroup != nil { + in, out := &in.ShardGroup, &out.ShardGroup + *out = make([]ShardGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]UserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zookeeper != nil { + in, out := &in.Zookeeper, &out.Zookeeper + *out = make([]ZookeeperObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseClusterObservation. +func (in *ClickhouseClusterObservation) DeepCopy() *ClickhouseClusterObservation { + if in == nil { + return nil + } + out := new(ClickhouseClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseClusterParameters) DeepCopyInto(out *ClickhouseClusterParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]AccessParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminPasswordSecretRef != nil { + in, out := &in.AdminPasswordSecretRef, &out.AdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]BackupWindowStartParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Clickhouse != nil { + in, out := &in.Clickhouse, &out.Clickhouse + *out = make([]ClickhouseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudStorage != nil { + in, out := &in.CloudStorage, &out.CloudStorage + *out = make([]CloudStorageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CopySchemaOnNewHosts != nil { + in, out := &in.CopySchemaOnNewHosts, &out.CopySchemaOnNewHosts + *out = new(bool) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]DatabaseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EmbeddedKeeper != nil { + in, out := &in.EmbeddedKeeper, &out.EmbeddedKeeper + *out = new(bool) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FormatSchema != nil { + in, out := &in.FormatSchema, &out.FormatSchema + *out = make([]FormatSchemaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]HostParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MLModel != nil { + in, out := &in.MLModel, &out.MLModel + *out = make([]MLModelParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SQLDatabaseManagement != nil { + in, out := &in.SQLDatabaseManagement, &out.SQLDatabaseManagement + *out = new(bool) + **out = **in + } + if in.SQLUserManagement != nil { + in, out := &in.SQLUserManagement, &out.SQLUserManagement + *out = new(bool) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Shard != nil { + in, out := &in.Shard, &out.Shard + *out = make([]ShardParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardGroup != nil { + in, out := &in.ShardGroup, &out.ShardGroup + *out = make([]ShardGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]UserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zookeeper != nil { + in, out := &in.Zookeeper, &out.Zookeeper + *out = make([]ZookeeperParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseClusterParameters. +func (in *ClickhouseClusterParameters) DeepCopy() *ClickhouseClusterParameters { + if in == nil { + return nil + } + out := new(ClickhouseClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseClusterSpec) DeepCopyInto(out *ClickhouseClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseClusterSpec. +func (in *ClickhouseClusterSpec) DeepCopy() *ClickhouseClusterSpec { + if in == nil { + return nil + } + out := new(ClickhouseClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseClusterStatus) DeepCopyInto(out *ClickhouseClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseClusterStatus. +func (in *ClickhouseClusterStatus) DeepCopy() *ClickhouseClusterStatus { + if in == nil { + return nil + } + out := new(ClickhouseClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseInitParameters) DeepCopyInto(out *ClickhouseInitParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseInitParameters. +func (in *ClickhouseInitParameters) DeepCopy() *ClickhouseInitParameters { + if in == nil { + return nil + } + out := new(ClickhouseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseObservation) DeepCopyInto(out *ClickhouseObservation) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseObservation. +func (in *ClickhouseObservation) DeepCopy() *ClickhouseObservation { + if in == nil { + return nil + } + out := new(ClickhouseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseParameters) DeepCopyInto(out *ClickhouseParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseParameters. +func (in *ClickhouseParameters) DeepCopy() *ClickhouseParameters { + if in == nil { + return nil + } + out := new(ClickhouseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudStorageInitParameters) DeepCopyInto(out *CloudStorageInitParameters) { + *out = *in + if in.DataCacheEnabled != nil { + in, out := &in.DataCacheEnabled, &out.DataCacheEnabled + *out = new(bool) + **out = **in + } + if in.DataCacheMaxSize != nil { + in, out := &in.DataCacheMaxSize, &out.DataCacheMaxSize + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MoveFactor != nil { + in, out := &in.MoveFactor, &out.MoveFactor + *out = new(float64) + **out = **in + } + if in.PreferNotToMerge != nil { + in, out := &in.PreferNotToMerge, &out.PreferNotToMerge + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudStorageInitParameters. +func (in *CloudStorageInitParameters) DeepCopy() *CloudStorageInitParameters { + if in == nil { + return nil + } + out := new(CloudStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudStorageObservation) DeepCopyInto(out *CloudStorageObservation) { + *out = *in + if in.DataCacheEnabled != nil { + in, out := &in.DataCacheEnabled, &out.DataCacheEnabled + *out = new(bool) + **out = **in + } + if in.DataCacheMaxSize != nil { + in, out := &in.DataCacheMaxSize, &out.DataCacheMaxSize + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MoveFactor != nil { + in, out := &in.MoveFactor, &out.MoveFactor + *out = new(float64) + **out = **in + } + if in.PreferNotToMerge != nil { + in, out := &in.PreferNotToMerge, &out.PreferNotToMerge + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudStorageObservation. +func (in *CloudStorageObservation) DeepCopy() *CloudStorageObservation { + if in == nil { + return nil + } + out := new(CloudStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudStorageParameters) DeepCopyInto(out *CloudStorageParameters) { + *out = *in + if in.DataCacheEnabled != nil { + in, out := &in.DataCacheEnabled, &out.DataCacheEnabled + *out = new(bool) + **out = **in + } + if in.DataCacheMaxSize != nil { + in, out := &in.DataCacheMaxSize, &out.DataCacheMaxSize + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MoveFactor != nil { + in, out := &in.MoveFactor, &out.MoveFactor + *out = new(float64) + **out = **in + } + if in.PreferNotToMerge != nil { + in, out := &in.PreferNotToMerge, &out.PreferNotToMerge + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudStorageParameters. +func (in *CloudStorageParameters) DeepCopy() *CloudStorageParameters { + if in == nil { + return nil + } + out := new(CloudStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigAccessInitParameters) DeepCopyInto(out *ClusterConfigAccessInitParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigAccessInitParameters. +func (in *ClusterConfigAccessInitParameters) DeepCopy() *ClusterConfigAccessInitParameters { + if in == nil { + return nil + } + out := new(ClusterConfigAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigAccessObservation) DeepCopyInto(out *ClusterConfigAccessObservation) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigAccessObservation. +func (in *ClusterConfigAccessObservation) DeepCopy() *ClusterConfigAccessObservation { + if in == nil { + return nil + } + out := new(ClusterConfigAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigAccessParameters) DeepCopyInto(out *ClusterConfigAccessParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigAccessParameters. +func (in *ClusterConfigAccessParameters) DeepCopy() *ClusterConfigAccessParameters { + if in == nil { + return nil + } + out := new(ClusterConfigAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigBackupWindowStartInitParameters) DeepCopyInto(out *ClusterConfigBackupWindowStartInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigBackupWindowStartInitParameters. +func (in *ClusterConfigBackupWindowStartInitParameters) DeepCopy() *ClusterConfigBackupWindowStartInitParameters { + if in == nil { + return nil + } + out := new(ClusterConfigBackupWindowStartInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigBackupWindowStartObservation) DeepCopyInto(out *ClusterConfigBackupWindowStartObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigBackupWindowStartObservation. +func (in *ClusterConfigBackupWindowStartObservation) DeepCopy() *ClusterConfigBackupWindowStartObservation { + if in == nil { + return nil + } + out := new(ClusterConfigBackupWindowStartObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigBackupWindowStartParameters) DeepCopyInto(out *ClusterConfigBackupWindowStartParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigBackupWindowStartParameters. +func (in *ClusterConfigBackupWindowStartParameters) DeepCopy() *ClusterConfigBackupWindowStartParameters { + if in == nil { + return nil + } + out := new(ClusterConfigBackupWindowStartParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigInitParameters) DeepCopyInto(out *ClusterConfigInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]ClusterConfigAccessInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]ClusterConfigBackupWindowStartInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FeatureCompatibilityVersion != nil { + in, out := &in.FeatureCompatibilityVersion, &out.FeatureCompatibilityVersion + *out = new(string) + **out = **in + } + if in.Mongocfg != nil { + in, out := &in.Mongocfg, &out.Mongocfg + *out = make([]MongocfgInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mongod != nil { + in, out := &in.Mongod, &out.Mongod + *out = make([]MongodInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mongos != nil { + in, out := &in.Mongos, &out.Mongos + *out = make([]MongosInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceDiagnostics != nil { + in, out := &in.PerformanceDiagnostics, &out.PerformanceDiagnostics + *out = make([]PerformanceDiagnosticsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigInitParameters. +func (in *ClusterConfigInitParameters) DeepCopy() *ClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(ClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigObservation) DeepCopyInto(out *ClusterConfigObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]ClusterConfigAccessObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]ClusterConfigBackupWindowStartObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FeatureCompatibilityVersion != nil { + in, out := &in.FeatureCompatibilityVersion, &out.FeatureCompatibilityVersion + *out = new(string) + **out = **in + } + if in.Mongocfg != nil { + in, out := &in.Mongocfg, &out.Mongocfg + *out = make([]MongocfgObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mongod != nil { + in, out := &in.Mongod, &out.Mongod + *out = make([]MongodObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mongos != nil { + in, out := &in.Mongos, &out.Mongos + *out = make([]MongosObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceDiagnostics != nil { + in, out := &in.PerformanceDiagnostics, &out.PerformanceDiagnostics + *out = make([]PerformanceDiagnosticsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigObservation. +func (in *ClusterConfigObservation) DeepCopy() *ClusterConfigObservation { + if in == nil { + return nil + } + out := new(ClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigParameters) DeepCopyInto(out *ClusterConfigParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]ClusterConfigAccessParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]ClusterConfigBackupWindowStartParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FeatureCompatibilityVersion != nil { + in, out := &in.FeatureCompatibilityVersion, &out.FeatureCompatibilityVersion + *out = new(string) + **out = **in + } + if in.Mongocfg != nil { + in, out := &in.Mongocfg, &out.Mongocfg + *out = make([]MongocfgParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mongod != nil { + in, out := &in.Mongod, &out.Mongod + *out = make([]MongodParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mongos != nil { + in, out := &in.Mongos, &out.Mongos + *out = make([]MongosParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceDiagnostics != nil { + in, out := &in.PerformanceDiagnostics, &out.PerformanceDiagnostics + *out = make([]PerformanceDiagnosticsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigParameters. +func (in *ClusterConfigParameters) DeepCopy() *ClusterConfigParameters { + if in == nil { + return nil + } + out := new(ClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompressionInitParameters) DeepCopyInto(out *CompressionInitParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(float64) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.MinPartSize != nil { + in, out := &in.MinPartSize, &out.MinPartSize + *out = new(float64) + **out = **in + } + if in.MinPartSizeRatio != nil { + in, out := &in.MinPartSizeRatio, &out.MinPartSizeRatio + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompressionInitParameters. +func (in *CompressionInitParameters) DeepCopy() *CompressionInitParameters { + if in == nil { + return nil + } + out := new(CompressionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompressionObservation) DeepCopyInto(out *CompressionObservation) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(float64) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.MinPartSize != nil { + in, out := &in.MinPartSize, &out.MinPartSize + *out = new(float64) + **out = **in + } + if in.MinPartSizeRatio != nil { + in, out := &in.MinPartSizeRatio, &out.MinPartSizeRatio + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompressionObservation. +func (in *CompressionObservation) DeepCopy() *CompressionObservation { + if in == nil { + return nil + } + out := new(CompressionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompressionParameters) DeepCopyInto(out *CompressionParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(float64) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.MinPartSize != nil { + in, out := &in.MinPartSize, &out.MinPartSize + *out = new(float64) + **out = **in + } + if in.MinPartSizeRatio != nil { + in, out := &in.MinPartSizeRatio, &out.MinPartSizeRatio + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompressionParameters. +func (in *CompressionParameters) DeepCopy() *CompressionParameters { + if in == nil { + return nil + } + out := new(CompressionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigAccessInitParameters) DeepCopyInto(out *ConfigAccessInitParameters) { + *out = *in + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigAccessInitParameters. +func (in *ConfigAccessInitParameters) DeepCopy() *ConfigAccessInitParameters { + if in == nil { + return nil + } + out := new(ConfigAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigAccessObservation) DeepCopyInto(out *ConfigAccessObservation) { + *out = *in + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigAccessObservation. +func (in *ConfigAccessObservation) DeepCopy() *ConfigAccessObservation { + if in == nil { + return nil + } + out := new(ConfigAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigAccessParameters) DeepCopyInto(out *ConfigAccessParameters) { + *out = *in + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigAccessParameters. +func (in *ConfigAccessParameters) DeepCopy() *ConfigAccessParameters { + if in == nil { + return nil + } + out := new(ConfigAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigBackupWindowStartInitParameters) DeepCopyInto(out *ConfigBackupWindowStartInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigBackupWindowStartInitParameters. +func (in *ConfigBackupWindowStartInitParameters) DeepCopy() *ConfigBackupWindowStartInitParameters { + if in == nil { + return nil + } + out := new(ConfigBackupWindowStartInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigBackupWindowStartObservation) DeepCopyInto(out *ConfigBackupWindowStartObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigBackupWindowStartObservation. +func (in *ConfigBackupWindowStartObservation) DeepCopy() *ConfigBackupWindowStartObservation { + if in == nil { + return nil + } + out := new(ConfigBackupWindowStartObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigBackupWindowStartParameters) DeepCopyInto(out *ConfigBackupWindowStartParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigBackupWindowStartParameters. +func (in *ConfigBackupWindowStartParameters) DeepCopy() *ConfigBackupWindowStartParameters { + if in == nil { + return nil + } + out := new(ConfigBackupWindowStartParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigDiskSizeAutoscalingInitParameters) DeepCopyInto(out *ConfigDiskSizeAutoscalingInitParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigDiskSizeAutoscalingInitParameters. +func (in *ConfigDiskSizeAutoscalingInitParameters) DeepCopy() *ConfigDiskSizeAutoscalingInitParameters { + if in == nil { + return nil + } + out := new(ConfigDiskSizeAutoscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigDiskSizeAutoscalingObservation) DeepCopyInto(out *ConfigDiskSizeAutoscalingObservation) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigDiskSizeAutoscalingObservation. +func (in *ConfigDiskSizeAutoscalingObservation) DeepCopy() *ConfigDiskSizeAutoscalingObservation { + if in == nil { + return nil + } + out := new(ConfigDiskSizeAutoscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigDiskSizeAutoscalingParameters) DeepCopyInto(out *ConfigDiskSizeAutoscalingParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigDiskSizeAutoscalingParameters. +func (in *ConfigDiskSizeAutoscalingParameters) DeepCopy() *ConfigDiskSizeAutoscalingParameters { + if in == nil { + return nil + } + out := new(ConfigDiskSizeAutoscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigInitParameters) DeepCopyInto(out *ConfigInitParameters) { + *out = *in + if in.AsynchronousInsertLogEnabled != nil { + in, out := &in.AsynchronousInsertLogEnabled, &out.AsynchronousInsertLogEnabled + *out = new(bool) + **out = **in + } + if in.AsynchronousInsertLogRetentionSize != nil { + in, out := &in.AsynchronousInsertLogRetentionSize, &out.AsynchronousInsertLogRetentionSize + *out = new(float64) + **out = **in + } + if in.AsynchronousInsertLogRetentionTime != nil { + in, out := &in.AsynchronousInsertLogRetentionTime, &out.AsynchronousInsertLogRetentionTime + *out = new(float64) + **out = **in + } + if in.AsynchronousMetricLogEnabled != nil { + in, out := &in.AsynchronousMetricLogEnabled, &out.AsynchronousMetricLogEnabled + *out = new(bool) + **out = **in + } + if in.AsynchronousMetricLogRetentionSize != nil { + in, out := &in.AsynchronousMetricLogRetentionSize, &out.AsynchronousMetricLogRetentionSize + *out = new(float64) + **out = **in + } + if in.AsynchronousMetricLogRetentionTime != nil { + in, out := &in.AsynchronousMetricLogRetentionTime, &out.AsynchronousMetricLogRetentionTime + *out = new(float64) + **out = **in + } + if in.BackgroundBufferFlushSchedulePoolSize != nil { + in, out := &in.BackgroundBufferFlushSchedulePoolSize, &out.BackgroundBufferFlushSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundCommonPoolSize != nil { + in, out := &in.BackgroundCommonPoolSize, &out.BackgroundCommonPoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundDistributedSchedulePoolSize != nil { + in, out := &in.BackgroundDistributedSchedulePoolSize, &out.BackgroundDistributedSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundFetchesPoolSize != nil { + in, out := &in.BackgroundFetchesPoolSize, &out.BackgroundFetchesPoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundMergesMutationsConcurrencyRatio != nil { + in, out := &in.BackgroundMergesMutationsConcurrencyRatio, &out.BackgroundMergesMutationsConcurrencyRatio + *out = new(float64) + **out = **in + } + if in.BackgroundMessageBrokerSchedulePoolSize != nil { + in, out := &in.BackgroundMessageBrokerSchedulePoolSize, &out.BackgroundMessageBrokerSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundMovePoolSize != nil { + in, out := &in.BackgroundMovePoolSize, &out.BackgroundMovePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundPoolSize != nil { + in, out := &in.BackgroundPoolSize, &out.BackgroundPoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundSchedulePoolSize != nil { + in, out := &in.BackgroundSchedulePoolSize, &out.BackgroundSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = make([]CompressionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultDatabase != nil { + in, out := &in.DefaultDatabase, &out.DefaultDatabase + *out = new(string) + **out = **in + } + if in.DictionariesLazyLoad != nil { + in, out := &in.DictionariesLazyLoad, &out.DictionariesLazyLoad + *out = new(bool) + **out = **in + } + if in.GeobaseEnabled != nil { + in, out := &in.GeobaseEnabled, &out.GeobaseEnabled + *out = new(bool) + **out = **in + } + if in.GeobaseURI != nil { + in, out := &in.GeobaseURI, &out.GeobaseURI + *out = new(string) + **out = **in + } + if in.GraphiteRollup != nil { + in, out := &in.GraphiteRollup, &out.GraphiteRollup + *out = make([]GraphiteRollupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = make([]KafkaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KafkaTopic != nil { + in, out := &in.KafkaTopic, &out.KafkaTopic + *out = make([]KafkaTopicInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeepAliveTimeout != nil { + in, out := &in.KeepAliveTimeout, &out.KeepAliveTimeout + *out = new(float64) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.MarkCacheSize != nil { + in, out := &in.MarkCacheSize, &out.MarkCacheSize + *out = new(float64) + **out = **in + } + if in.MaxConcurrentQueries != nil { + in, out := &in.MaxConcurrentQueries, &out.MaxConcurrentQueries + *out = new(float64) + **out = **in + } + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } + if in.MaxPartitionSizeToDrop != nil { + in, out := &in.MaxPartitionSizeToDrop, &out.MaxPartitionSizeToDrop + *out = new(float64) + **out = **in + } + if in.MaxTableSizeToDrop != nil { + in, out := &in.MaxTableSizeToDrop, &out.MaxTableSizeToDrop + *out = new(float64) + **out = **in + } + if in.MergeTree != nil { + in, out := &in.MergeTree, &out.MergeTree + *out = make([]MergeTreeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricLogEnabled != nil { + in, out := &in.MetricLogEnabled, &out.MetricLogEnabled + *out = new(bool) + **out = **in + } + if in.MetricLogRetentionSize != nil { + in, out := &in.MetricLogRetentionSize, &out.MetricLogRetentionSize + *out = new(float64) + **out = **in + } + if in.MetricLogRetentionTime != nil { + in, out := &in.MetricLogRetentionTime, &out.MetricLogRetentionTime + *out = new(float64) + **out = **in + } + if in.OpentelemetrySpanLogEnabled != nil { + in, out := &in.OpentelemetrySpanLogEnabled, &out.OpentelemetrySpanLogEnabled + *out = new(bool) + **out = **in + } + if in.OpentelemetrySpanLogRetentionSize != nil { + in, out := &in.OpentelemetrySpanLogRetentionSize, &out.OpentelemetrySpanLogRetentionSize + *out = new(float64) + **out = **in + } + if in.OpentelemetrySpanLogRetentionTime != nil { + in, out := &in.OpentelemetrySpanLogRetentionTime, &out.OpentelemetrySpanLogRetentionTime + *out = new(float64) + **out = **in + } + if in.PartLogRetentionSize != nil { + in, out := &in.PartLogRetentionSize, &out.PartLogRetentionSize + *out = new(float64) + **out = **in + } + if in.PartLogRetentionTime != nil { + in, out := &in.PartLogRetentionTime, &out.PartLogRetentionTime + *out = new(float64) + **out = **in + } + if in.QueryCache != nil { + in, out := &in.QueryCache, &out.QueryCache + *out = make([]QueryCacheInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryLogRetentionSize != nil { + in, out := &in.QueryLogRetentionSize, &out.QueryLogRetentionSize + *out = new(float64) + **out = **in + } + if in.QueryLogRetentionTime != nil { + in, out := &in.QueryLogRetentionTime, &out.QueryLogRetentionTime + *out = new(float64) + **out = **in + } + if in.QueryMaskingRules != nil { + in, out := &in.QueryMaskingRules, &out.QueryMaskingRules + *out = make([]QueryMaskingRulesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryThreadLogEnabled != nil { + in, out := &in.QueryThreadLogEnabled, &out.QueryThreadLogEnabled + *out = new(bool) + **out = **in + } + if in.QueryThreadLogRetentionSize != nil { + in, out := &in.QueryThreadLogRetentionSize, &out.QueryThreadLogRetentionSize + *out = new(float64) + **out = **in + } + if in.QueryThreadLogRetentionTime != nil { + in, out := &in.QueryThreadLogRetentionTime, &out.QueryThreadLogRetentionTime + *out = new(float64) + **out = **in + } + if in.QueryViewsLogEnabled != nil { + in, out := &in.QueryViewsLogEnabled, &out.QueryViewsLogEnabled + *out = new(bool) + **out = **in + } + if in.QueryViewsLogRetentionSize != nil { + in, out := &in.QueryViewsLogRetentionSize, &out.QueryViewsLogRetentionSize + *out = new(float64) + **out = **in + } + if in.QueryViewsLogRetentionTime != nil { + in, out := &in.QueryViewsLogRetentionTime, &out.QueryViewsLogRetentionTime + *out = new(float64) + **out = **in + } + if in.Rabbitmq != nil { + in, out := &in.Rabbitmq, &out.Rabbitmq + *out = make([]RabbitmqInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SessionLogEnabled != nil { + in, out := &in.SessionLogEnabled, &out.SessionLogEnabled + *out = new(bool) + **out = **in + } + if in.SessionLogRetentionSize != nil { + in, out := &in.SessionLogRetentionSize, &out.SessionLogRetentionSize + *out = new(float64) + **out = **in + } + if in.SessionLogRetentionTime != nil { + in, out := &in.SessionLogRetentionTime, &out.SessionLogRetentionTime + *out = new(float64) + **out = **in + } + if in.TextLogEnabled != nil { + in, out := &in.TextLogEnabled, &out.TextLogEnabled + *out = new(bool) + **out = **in + } + if in.TextLogLevel != nil { + in, out := &in.TextLogLevel, &out.TextLogLevel + *out = new(string) + **out = **in + } + if in.TextLogRetentionSize != nil { + in, out := &in.TextLogRetentionSize, &out.TextLogRetentionSize + *out = new(float64) + **out = **in + } + if in.TextLogRetentionTime != nil { + in, out := &in.TextLogRetentionTime, &out.TextLogRetentionTime + *out = new(float64) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.TotalMemoryProfilerStep != nil { + in, out := &in.TotalMemoryProfilerStep, &out.TotalMemoryProfilerStep + *out = new(float64) + **out = **in + } + if in.TraceLogEnabled != nil { + in, out := &in.TraceLogEnabled, &out.TraceLogEnabled + *out = new(bool) + **out = **in + } + if in.TraceLogRetentionSize != nil { + in, out := &in.TraceLogRetentionSize, &out.TraceLogRetentionSize + *out = new(float64) + **out = **in + } + if in.TraceLogRetentionTime != nil { + in, out := &in.TraceLogRetentionTime, &out.TraceLogRetentionTime + *out = new(float64) + **out = **in + } + if in.UncompressedCacheSize != nil { + in, out := &in.UncompressedCacheSize, &out.UncompressedCacheSize + *out = new(float64) + **out = **in + } + if in.ZookeeperLogEnabled != nil { + in, out := &in.ZookeeperLogEnabled, &out.ZookeeperLogEnabled + *out = new(bool) + **out = **in + } + if in.ZookeeperLogRetentionSize != nil { + in, out := &in.ZookeeperLogRetentionSize, &out.ZookeeperLogRetentionSize + *out = new(float64) + **out = **in + } + if in.ZookeeperLogRetentionTime != nil { + in, out := &in.ZookeeperLogRetentionTime, &out.ZookeeperLogRetentionTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigInitParameters. +func (in *ConfigInitParameters) DeepCopy() *ConfigInitParameters { + if in == nil { + return nil + } + out := new(ConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigKafkaInitParameters) DeepCopyInto(out *ConfigKafkaInitParameters) { + *out = *in + if in.KafkaConfig != nil { + in, out := &in.KafkaConfig, &out.KafkaConfig + *out = make([]KafkaConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]KafkaResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigKafkaInitParameters. +func (in *ConfigKafkaInitParameters) DeepCopy() *ConfigKafkaInitParameters { + if in == nil { + return nil + } + out := new(ConfigKafkaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigKafkaObservation) DeepCopyInto(out *ConfigKafkaObservation) { + *out = *in + if in.KafkaConfig != nil { + in, out := &in.KafkaConfig, &out.KafkaConfig + *out = make([]KafkaConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]KafkaResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigKafkaObservation. +func (in *ConfigKafkaObservation) DeepCopy() *ConfigKafkaObservation { + if in == nil { + return nil + } + out := new(ConfigKafkaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigKafkaParameters) DeepCopyInto(out *ConfigKafkaParameters) { + *out = *in + if in.KafkaConfig != nil { + in, out := &in.KafkaConfig, &out.KafkaConfig + *out = make([]KafkaConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]KafkaResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigKafkaParameters. +func (in *ConfigKafkaParameters) DeepCopy() *ConfigKafkaParameters { + if in == nil { + return nil + } + out := new(ConfigKafkaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigObservation) DeepCopyInto(out *ConfigObservation) { + *out = *in + if in.AsynchronousInsertLogEnabled != nil { + in, out := &in.AsynchronousInsertLogEnabled, &out.AsynchronousInsertLogEnabled + *out = new(bool) + **out = **in + } + if in.AsynchronousInsertLogRetentionSize != nil { + in, out := &in.AsynchronousInsertLogRetentionSize, &out.AsynchronousInsertLogRetentionSize + *out = new(float64) + **out = **in + } + if in.AsynchronousInsertLogRetentionTime != nil { + in, out := &in.AsynchronousInsertLogRetentionTime, &out.AsynchronousInsertLogRetentionTime + *out = new(float64) + **out = **in + } + if in.AsynchronousMetricLogEnabled != nil { + in, out := &in.AsynchronousMetricLogEnabled, &out.AsynchronousMetricLogEnabled + *out = new(bool) + **out = **in + } + if in.AsynchronousMetricLogRetentionSize != nil { + in, out := &in.AsynchronousMetricLogRetentionSize, &out.AsynchronousMetricLogRetentionSize + *out = new(float64) + **out = **in + } + if in.AsynchronousMetricLogRetentionTime != nil { + in, out := &in.AsynchronousMetricLogRetentionTime, &out.AsynchronousMetricLogRetentionTime + *out = new(float64) + **out = **in + } + if in.BackgroundBufferFlushSchedulePoolSize != nil { + in, out := &in.BackgroundBufferFlushSchedulePoolSize, &out.BackgroundBufferFlushSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundCommonPoolSize != nil { + in, out := &in.BackgroundCommonPoolSize, &out.BackgroundCommonPoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundDistributedSchedulePoolSize != nil { + in, out := &in.BackgroundDistributedSchedulePoolSize, &out.BackgroundDistributedSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundFetchesPoolSize != nil { + in, out := &in.BackgroundFetchesPoolSize, &out.BackgroundFetchesPoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundMergesMutationsConcurrencyRatio != nil { + in, out := &in.BackgroundMergesMutationsConcurrencyRatio, &out.BackgroundMergesMutationsConcurrencyRatio + *out = new(float64) + **out = **in + } + if in.BackgroundMessageBrokerSchedulePoolSize != nil { + in, out := &in.BackgroundMessageBrokerSchedulePoolSize, &out.BackgroundMessageBrokerSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundMovePoolSize != nil { + in, out := &in.BackgroundMovePoolSize, &out.BackgroundMovePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundPoolSize != nil { + in, out := &in.BackgroundPoolSize, &out.BackgroundPoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundSchedulePoolSize != nil { + in, out := &in.BackgroundSchedulePoolSize, &out.BackgroundSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = make([]CompressionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultDatabase != nil { + in, out := &in.DefaultDatabase, &out.DefaultDatabase + *out = new(string) + **out = **in + } + if in.DictionariesLazyLoad != nil { + in, out := &in.DictionariesLazyLoad, &out.DictionariesLazyLoad + *out = new(bool) + **out = **in + } + if in.GeobaseEnabled != nil { + in, out := &in.GeobaseEnabled, &out.GeobaseEnabled + *out = new(bool) + **out = **in + } + if in.GeobaseURI != nil { + in, out := &in.GeobaseURI, &out.GeobaseURI + *out = new(string) + **out = **in + } + if in.GraphiteRollup != nil { + in, out := &in.GraphiteRollup, &out.GraphiteRollup + *out = make([]GraphiteRollupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = make([]KafkaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KafkaTopic != nil { + in, out := &in.KafkaTopic, &out.KafkaTopic + *out = make([]KafkaTopicObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeepAliveTimeout != nil { + in, out := &in.KeepAliveTimeout, &out.KeepAliveTimeout + *out = new(float64) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.MarkCacheSize != nil { + in, out := &in.MarkCacheSize, &out.MarkCacheSize + *out = new(float64) + **out = **in + } + if in.MaxConcurrentQueries != nil { + in, out := &in.MaxConcurrentQueries, &out.MaxConcurrentQueries + *out = new(float64) + **out = **in + } + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } + if in.MaxPartitionSizeToDrop != nil { + in, out := &in.MaxPartitionSizeToDrop, &out.MaxPartitionSizeToDrop + *out = new(float64) + **out = **in + } + if in.MaxTableSizeToDrop != nil { + in, out := &in.MaxTableSizeToDrop, &out.MaxTableSizeToDrop + *out = new(float64) + **out = **in + } + if in.MergeTree != nil { + in, out := &in.MergeTree, &out.MergeTree + *out = make([]MergeTreeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricLogEnabled != nil { + in, out := &in.MetricLogEnabled, &out.MetricLogEnabled + *out = new(bool) + **out = **in + } + if in.MetricLogRetentionSize != nil { + in, out := &in.MetricLogRetentionSize, &out.MetricLogRetentionSize + *out = new(float64) + **out = **in + } + if in.MetricLogRetentionTime != nil { + in, out := &in.MetricLogRetentionTime, &out.MetricLogRetentionTime + *out = new(float64) + **out = **in + } + if in.OpentelemetrySpanLogEnabled != nil { + in, out := &in.OpentelemetrySpanLogEnabled, &out.OpentelemetrySpanLogEnabled + *out = new(bool) + **out = **in + } + if in.OpentelemetrySpanLogRetentionSize != nil { + in, out := &in.OpentelemetrySpanLogRetentionSize, &out.OpentelemetrySpanLogRetentionSize + *out = new(float64) + **out = **in + } + if in.OpentelemetrySpanLogRetentionTime != nil { + in, out := &in.OpentelemetrySpanLogRetentionTime, &out.OpentelemetrySpanLogRetentionTime + *out = new(float64) + **out = **in + } + if in.PartLogRetentionSize != nil { + in, out := &in.PartLogRetentionSize, &out.PartLogRetentionSize + *out = new(float64) + **out = **in + } + if in.PartLogRetentionTime != nil { + in, out := &in.PartLogRetentionTime, &out.PartLogRetentionTime + *out = new(float64) + **out = **in + } + if in.QueryCache != nil { + in, out := &in.QueryCache, &out.QueryCache + *out = make([]QueryCacheObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryLogRetentionSize != nil { + in, out := &in.QueryLogRetentionSize, &out.QueryLogRetentionSize + *out = new(float64) + **out = **in + } + if in.QueryLogRetentionTime != nil { + in, out := &in.QueryLogRetentionTime, &out.QueryLogRetentionTime + *out = new(float64) + **out = **in + } + if in.QueryMaskingRules != nil { + in, out := &in.QueryMaskingRules, &out.QueryMaskingRules + *out = make([]QueryMaskingRulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryThreadLogEnabled != nil { + in, out := &in.QueryThreadLogEnabled, &out.QueryThreadLogEnabled + *out = new(bool) + **out = **in + } + if in.QueryThreadLogRetentionSize != nil { + in, out := &in.QueryThreadLogRetentionSize, &out.QueryThreadLogRetentionSize + *out = new(float64) + **out = **in + } + if in.QueryThreadLogRetentionTime != nil { + in, out := &in.QueryThreadLogRetentionTime, &out.QueryThreadLogRetentionTime + *out = new(float64) + **out = **in + } + if in.QueryViewsLogEnabled != nil { + in, out := &in.QueryViewsLogEnabled, &out.QueryViewsLogEnabled + *out = new(bool) + **out = **in + } + if in.QueryViewsLogRetentionSize != nil { + in, out := &in.QueryViewsLogRetentionSize, &out.QueryViewsLogRetentionSize + *out = new(float64) + **out = **in + } + if in.QueryViewsLogRetentionTime != nil { + in, out := &in.QueryViewsLogRetentionTime, &out.QueryViewsLogRetentionTime + *out = new(float64) + **out = **in + } + if in.Rabbitmq != nil { + in, out := &in.Rabbitmq, &out.Rabbitmq + *out = make([]RabbitmqObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SessionLogEnabled != nil { + in, out := &in.SessionLogEnabled, &out.SessionLogEnabled + *out = new(bool) + **out = **in + } + if in.SessionLogRetentionSize != nil { + in, out := &in.SessionLogRetentionSize, &out.SessionLogRetentionSize + *out = new(float64) + **out = **in + } + if in.SessionLogRetentionTime != nil { + in, out := &in.SessionLogRetentionTime, &out.SessionLogRetentionTime + *out = new(float64) + **out = **in + } + if in.TextLogEnabled != nil { + in, out := &in.TextLogEnabled, &out.TextLogEnabled + *out = new(bool) + **out = **in + } + if in.TextLogLevel != nil { + in, out := &in.TextLogLevel, &out.TextLogLevel + *out = new(string) + **out = **in + } + if in.TextLogRetentionSize != nil { + in, out := &in.TextLogRetentionSize, &out.TextLogRetentionSize + *out = new(float64) + **out = **in + } + if in.TextLogRetentionTime != nil { + in, out := &in.TextLogRetentionTime, &out.TextLogRetentionTime + *out = new(float64) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.TotalMemoryProfilerStep != nil { + in, out := &in.TotalMemoryProfilerStep, &out.TotalMemoryProfilerStep + *out = new(float64) + **out = **in + } + if in.TraceLogEnabled != nil { + in, out := &in.TraceLogEnabled, &out.TraceLogEnabled + *out = new(bool) + **out = **in + } + if in.TraceLogRetentionSize != nil { + in, out := &in.TraceLogRetentionSize, &out.TraceLogRetentionSize + *out = new(float64) + **out = **in + } + if in.TraceLogRetentionTime != nil { + in, out := &in.TraceLogRetentionTime, &out.TraceLogRetentionTime + *out = new(float64) + **out = **in + } + if in.UncompressedCacheSize != nil { + in, out := &in.UncompressedCacheSize, &out.UncompressedCacheSize + *out = new(float64) + **out = **in + } + if in.ZookeeperLogEnabled != nil { + in, out := &in.ZookeeperLogEnabled, &out.ZookeeperLogEnabled + *out = new(bool) + **out = **in + } + if in.ZookeeperLogRetentionSize != nil { + in, out := &in.ZookeeperLogRetentionSize, &out.ZookeeperLogRetentionSize + *out = new(float64) + **out = **in + } + if in.ZookeeperLogRetentionTime != nil { + in, out := &in.ZookeeperLogRetentionTime, &out.ZookeeperLogRetentionTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigObservation. +func (in *ConfigObservation) DeepCopy() *ConfigObservation { + if in == nil { + return nil + } + out := new(ConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigParameters) DeepCopyInto(out *ConfigParameters) { + *out = *in + if in.AsynchronousInsertLogEnabled != nil { + in, out := &in.AsynchronousInsertLogEnabled, &out.AsynchronousInsertLogEnabled + *out = new(bool) + **out = **in + } + if in.AsynchronousInsertLogRetentionSize != nil { + in, out := &in.AsynchronousInsertLogRetentionSize, &out.AsynchronousInsertLogRetentionSize + *out = new(float64) + **out = **in + } + if in.AsynchronousInsertLogRetentionTime != nil { + in, out := &in.AsynchronousInsertLogRetentionTime, &out.AsynchronousInsertLogRetentionTime + *out = new(float64) + **out = **in + } + if in.AsynchronousMetricLogEnabled != nil { + in, out := &in.AsynchronousMetricLogEnabled, &out.AsynchronousMetricLogEnabled + *out = new(bool) + **out = **in + } + if in.AsynchronousMetricLogRetentionSize != nil { + in, out := &in.AsynchronousMetricLogRetentionSize, &out.AsynchronousMetricLogRetentionSize + *out = new(float64) + **out = **in + } + if in.AsynchronousMetricLogRetentionTime != nil { + in, out := &in.AsynchronousMetricLogRetentionTime, &out.AsynchronousMetricLogRetentionTime + *out = new(float64) + **out = **in + } + if in.BackgroundBufferFlushSchedulePoolSize != nil { + in, out := &in.BackgroundBufferFlushSchedulePoolSize, &out.BackgroundBufferFlushSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundCommonPoolSize != nil { + in, out := &in.BackgroundCommonPoolSize, &out.BackgroundCommonPoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundDistributedSchedulePoolSize != nil { + in, out := &in.BackgroundDistributedSchedulePoolSize, &out.BackgroundDistributedSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundFetchesPoolSize != nil { + in, out := &in.BackgroundFetchesPoolSize, &out.BackgroundFetchesPoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundMergesMutationsConcurrencyRatio != nil { + in, out := &in.BackgroundMergesMutationsConcurrencyRatio, &out.BackgroundMergesMutationsConcurrencyRatio + *out = new(float64) + **out = **in + } + if in.BackgroundMessageBrokerSchedulePoolSize != nil { + in, out := &in.BackgroundMessageBrokerSchedulePoolSize, &out.BackgroundMessageBrokerSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundMovePoolSize != nil { + in, out := &in.BackgroundMovePoolSize, &out.BackgroundMovePoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundPoolSize != nil { + in, out := &in.BackgroundPoolSize, &out.BackgroundPoolSize + *out = new(float64) + **out = **in + } + if in.BackgroundSchedulePoolSize != nil { + in, out := &in.BackgroundSchedulePoolSize, &out.BackgroundSchedulePoolSize + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = make([]CompressionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultDatabase != nil { + in, out := &in.DefaultDatabase, &out.DefaultDatabase + *out = new(string) + **out = **in + } + if in.DictionariesLazyLoad != nil { + in, out := &in.DictionariesLazyLoad, &out.DictionariesLazyLoad + *out = new(bool) + **out = **in + } + if in.GeobaseEnabled != nil { + in, out := &in.GeobaseEnabled, &out.GeobaseEnabled + *out = new(bool) + **out = **in + } + if in.GeobaseURI != nil { + in, out := &in.GeobaseURI, &out.GeobaseURI + *out = new(string) + **out = **in + } + if in.GraphiteRollup != nil { + in, out := &in.GraphiteRollup, &out.GraphiteRollup + *out = make([]GraphiteRollupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = make([]KafkaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KafkaTopic != nil { + in, out := &in.KafkaTopic, &out.KafkaTopic + *out = make([]KafkaTopicParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeepAliveTimeout != nil { + in, out := &in.KeepAliveTimeout, &out.KeepAliveTimeout + *out = new(float64) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.MarkCacheSize != nil { + in, out := &in.MarkCacheSize, &out.MarkCacheSize + *out = new(float64) + **out = **in + } + if in.MaxConcurrentQueries != nil { + in, out := &in.MaxConcurrentQueries, &out.MaxConcurrentQueries + *out = new(float64) + **out = **in + } + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } + if in.MaxPartitionSizeToDrop != nil { + in, out := &in.MaxPartitionSizeToDrop, &out.MaxPartitionSizeToDrop + *out = new(float64) + **out = **in + } + if in.MaxTableSizeToDrop != nil { + in, out := &in.MaxTableSizeToDrop, &out.MaxTableSizeToDrop + *out = new(float64) + **out = **in + } + if in.MergeTree != nil { + in, out := &in.MergeTree, &out.MergeTree + *out = make([]MergeTreeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricLogEnabled != nil { + in, out := &in.MetricLogEnabled, &out.MetricLogEnabled + *out = new(bool) + **out = **in + } + if in.MetricLogRetentionSize != nil { + in, out := &in.MetricLogRetentionSize, &out.MetricLogRetentionSize + *out = new(float64) + **out = **in + } + if in.MetricLogRetentionTime != nil { + in, out := &in.MetricLogRetentionTime, &out.MetricLogRetentionTime + *out = new(float64) + **out = **in + } + if in.OpentelemetrySpanLogEnabled != nil { + in, out := &in.OpentelemetrySpanLogEnabled, &out.OpentelemetrySpanLogEnabled + *out = new(bool) + **out = **in + } + if in.OpentelemetrySpanLogRetentionSize != nil { + in, out := &in.OpentelemetrySpanLogRetentionSize, &out.OpentelemetrySpanLogRetentionSize + *out = new(float64) + **out = **in + } + if in.OpentelemetrySpanLogRetentionTime != nil { + in, out := &in.OpentelemetrySpanLogRetentionTime, &out.OpentelemetrySpanLogRetentionTime + *out = new(float64) + **out = **in + } + if in.PartLogRetentionSize != nil { + in, out := &in.PartLogRetentionSize, &out.PartLogRetentionSize + *out = new(float64) + **out = **in + } + if in.PartLogRetentionTime != nil { + in, out := &in.PartLogRetentionTime, &out.PartLogRetentionTime + *out = new(float64) + **out = **in + } + if in.QueryCache != nil { + in, out := &in.QueryCache, &out.QueryCache + *out = make([]QueryCacheParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryLogRetentionSize != nil { + in, out := &in.QueryLogRetentionSize, &out.QueryLogRetentionSize + *out = new(float64) + **out = **in + } + if in.QueryLogRetentionTime != nil { + in, out := &in.QueryLogRetentionTime, &out.QueryLogRetentionTime + *out = new(float64) + **out = **in + } + if in.QueryMaskingRules != nil { + in, out := &in.QueryMaskingRules, &out.QueryMaskingRules + *out = make([]QueryMaskingRulesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryThreadLogEnabled != nil { + in, out := &in.QueryThreadLogEnabled, &out.QueryThreadLogEnabled + *out = new(bool) + **out = **in + } + if in.QueryThreadLogRetentionSize != nil { + in, out := &in.QueryThreadLogRetentionSize, &out.QueryThreadLogRetentionSize + *out = new(float64) + **out = **in + } + if in.QueryThreadLogRetentionTime != nil { + in, out := &in.QueryThreadLogRetentionTime, &out.QueryThreadLogRetentionTime + *out = new(float64) + **out = **in + } + if in.QueryViewsLogEnabled != nil { + in, out := &in.QueryViewsLogEnabled, &out.QueryViewsLogEnabled + *out = new(bool) + **out = **in + } + if in.QueryViewsLogRetentionSize != nil { + in, out := &in.QueryViewsLogRetentionSize, &out.QueryViewsLogRetentionSize + *out = new(float64) + **out = **in + } + if in.QueryViewsLogRetentionTime != nil { + in, out := &in.QueryViewsLogRetentionTime, &out.QueryViewsLogRetentionTime + *out = new(float64) + **out = **in + } + if in.Rabbitmq != nil { + in, out := &in.Rabbitmq, &out.Rabbitmq + *out = make([]RabbitmqParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SessionLogEnabled != nil { + in, out := &in.SessionLogEnabled, &out.SessionLogEnabled + *out = new(bool) + **out = **in + } + if in.SessionLogRetentionSize != nil { + in, out := &in.SessionLogRetentionSize, &out.SessionLogRetentionSize + *out = new(float64) + **out = **in + } + if in.SessionLogRetentionTime != nil { + in, out := &in.SessionLogRetentionTime, &out.SessionLogRetentionTime + *out = new(float64) + **out = **in + } + if in.TextLogEnabled != nil { + in, out := &in.TextLogEnabled, &out.TextLogEnabled + *out = new(bool) + **out = **in + } + if in.TextLogLevel != nil { + in, out := &in.TextLogLevel, &out.TextLogLevel + *out = new(string) + **out = **in + } + if in.TextLogRetentionSize != nil { + in, out := &in.TextLogRetentionSize, &out.TextLogRetentionSize + *out = new(float64) + **out = **in + } + if in.TextLogRetentionTime != nil { + in, out := &in.TextLogRetentionTime, &out.TextLogRetentionTime + *out = new(float64) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.TotalMemoryProfilerStep != nil { + in, out := &in.TotalMemoryProfilerStep, &out.TotalMemoryProfilerStep + *out = new(float64) + **out = **in + } + if in.TraceLogEnabled != nil { + in, out := &in.TraceLogEnabled, &out.TraceLogEnabled + *out = new(bool) + **out = **in + } + if in.TraceLogRetentionSize != nil { + in, out := &in.TraceLogRetentionSize, &out.TraceLogRetentionSize + *out = new(float64) + **out = **in + } + if in.TraceLogRetentionTime != nil { + in, out := &in.TraceLogRetentionTime, &out.TraceLogRetentionTime + *out = new(float64) + **out = **in + } + if in.UncompressedCacheSize != nil { + in, out := &in.UncompressedCacheSize, &out.UncompressedCacheSize + *out = new(float64) + **out = **in + } + if in.ZookeeperLogEnabled != nil { + in, out := &in.ZookeeperLogEnabled, &out.ZookeeperLogEnabled + *out = new(bool) + **out = **in + } + if in.ZookeeperLogRetentionSize != nil { + in, out := &in.ZookeeperLogRetentionSize, &out.ZookeeperLogRetentionSize + *out = new(float64) + **out = **in + } + if in.ZookeeperLogRetentionTime != nil { + in, out := &in.ZookeeperLogRetentionTime, &out.ZookeeperLogRetentionTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigParameters. +func (in *ConfigParameters) DeepCopy() *ConfigParameters { + if in == nil { + return nil + } + out := new(ConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigPerformanceDiagnosticsInitParameters) DeepCopyInto(out *ConfigPerformanceDiagnosticsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SessionsSamplingInterval != nil { + in, out := &in.SessionsSamplingInterval, &out.SessionsSamplingInterval + *out = new(float64) + **out = **in + } + if in.StatementsSamplingInterval != nil { + in, out := &in.StatementsSamplingInterval, &out.StatementsSamplingInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigPerformanceDiagnosticsInitParameters. +func (in *ConfigPerformanceDiagnosticsInitParameters) DeepCopy() *ConfigPerformanceDiagnosticsInitParameters { + if in == nil { + return nil + } + out := new(ConfigPerformanceDiagnosticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigPerformanceDiagnosticsObservation) DeepCopyInto(out *ConfigPerformanceDiagnosticsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SessionsSamplingInterval != nil { + in, out := &in.SessionsSamplingInterval, &out.SessionsSamplingInterval + *out = new(float64) + **out = **in + } + if in.StatementsSamplingInterval != nil { + in, out := &in.StatementsSamplingInterval, &out.StatementsSamplingInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigPerformanceDiagnosticsObservation. +func (in *ConfigPerformanceDiagnosticsObservation) DeepCopy() *ConfigPerformanceDiagnosticsObservation { + if in == nil { + return nil + } + out := new(ConfigPerformanceDiagnosticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigPerformanceDiagnosticsParameters) DeepCopyInto(out *ConfigPerformanceDiagnosticsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SessionsSamplingInterval != nil { + in, out := &in.SessionsSamplingInterval, &out.SessionsSamplingInterval + *out = new(float64) + **out = **in + } + if in.StatementsSamplingInterval != nil { + in, out := &in.StatementsSamplingInterval, &out.StatementsSamplingInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigPerformanceDiagnosticsParameters. +func (in *ConfigPerformanceDiagnosticsParameters) DeepCopy() *ConfigPerformanceDiagnosticsParameters { + if in == nil { + return nil + } + out := new(ConfigPerformanceDiagnosticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigPoolerConfigInitParameters) DeepCopyInto(out *ConfigPoolerConfigInitParameters) { + *out = *in + if in.PoolDiscard != nil { + in, out := &in.PoolDiscard, &out.PoolDiscard + *out = new(bool) + **out = **in + } + if in.PoolingMode != nil { + in, out := &in.PoolingMode, &out.PoolingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigPoolerConfigInitParameters. +func (in *ConfigPoolerConfigInitParameters) DeepCopy() *ConfigPoolerConfigInitParameters { + if in == nil { + return nil + } + out := new(ConfigPoolerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigPoolerConfigObservation) DeepCopyInto(out *ConfigPoolerConfigObservation) { + *out = *in + if in.PoolDiscard != nil { + in, out := &in.PoolDiscard, &out.PoolDiscard + *out = new(bool) + **out = **in + } + if in.PoolingMode != nil { + in, out := &in.PoolingMode, &out.PoolingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigPoolerConfigObservation. +func (in *ConfigPoolerConfigObservation) DeepCopy() *ConfigPoolerConfigObservation { + if in == nil { + return nil + } + out := new(ConfigPoolerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigPoolerConfigParameters) DeepCopyInto(out *ConfigPoolerConfigParameters) { + *out = *in + if in.PoolDiscard != nil { + in, out := &in.PoolDiscard, &out.PoolDiscard + *out = new(bool) + **out = **in + } + if in.PoolingMode != nil { + in, out := &in.PoolingMode, &out.PoolingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigPoolerConfigParameters. +func (in *ConfigPoolerConfigParameters) DeepCopy() *ConfigPoolerConfigParameters { + if in == nil { + return nil + } + out := new(ConfigPoolerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigResourcesInitParameters) DeepCopyInto(out *ConfigResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigResourcesInitParameters. +func (in *ConfigResourcesInitParameters) DeepCopy() *ConfigResourcesInitParameters { + if in == nil { + return nil + } + out := new(ConfigResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigResourcesObservation) DeepCopyInto(out *ConfigResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigResourcesObservation. +func (in *ConfigResourcesObservation) DeepCopy() *ConfigResourcesObservation { + if in == nil { + return nil + } + out := new(ConfigResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigResourcesParameters) DeepCopyInto(out *ConfigResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigResourcesParameters. +func (in *ConfigResourcesParameters) DeepCopy() *ConfigResourcesParameters { + if in == nil { + return nil + } + out := new(ConfigResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigZookeeperInitParameters) DeepCopyInto(out *ConfigZookeeperInitParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ConfigZookeeperResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigZookeeperInitParameters. +func (in *ConfigZookeeperInitParameters) DeepCopy() *ConfigZookeeperInitParameters { + if in == nil { + return nil + } + out := new(ConfigZookeeperInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigZookeeperObservation) DeepCopyInto(out *ConfigZookeeperObservation) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ConfigZookeeperResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigZookeeperObservation. +func (in *ConfigZookeeperObservation) DeepCopy() *ConfigZookeeperObservation { + if in == nil { + return nil + } + out := new(ConfigZookeeperObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigZookeeperParameters) DeepCopyInto(out *ConfigZookeeperParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ConfigZookeeperResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigZookeeperParameters. +func (in *ConfigZookeeperParameters) DeepCopy() *ConfigZookeeperParameters { + if in == nil { + return nil + } + out := new(ConfigZookeeperParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigZookeeperResourcesInitParameters) DeepCopyInto(out *ConfigZookeeperResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigZookeeperResourcesInitParameters. +func (in *ConfigZookeeperResourcesInitParameters) DeepCopy() *ConfigZookeeperResourcesInitParameters { + if in == nil { + return nil + } + out := new(ConfigZookeeperResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigZookeeperResourcesObservation) DeepCopyInto(out *ConfigZookeeperResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigZookeeperResourcesObservation. +func (in *ConfigZookeeperResourcesObservation) DeepCopy() *ConfigZookeeperResourcesObservation { + if in == nil { + return nil + } + out := new(ConfigZookeeperResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigZookeeperResourcesParameters) DeepCopyInto(out *ConfigZookeeperResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigZookeeperResourcesParameters. +func (in *ConfigZookeeperResourcesParameters) DeepCopy() *ConfigZookeeperResourcesParameters { + if in == nil { + return nil + } + out := new(ConfigZookeeperResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionLimitsInitParameters) DeepCopyInto(out *ConnectionLimitsInitParameters) { + *out = *in + if in.MaxConnectionsPerHour != nil { + in, out := &in.MaxConnectionsPerHour, &out.MaxConnectionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxQuestionsPerHour != nil { + in, out := &in.MaxQuestionsPerHour, &out.MaxQuestionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxUpdatesPerHour != nil { + in, out := &in.MaxUpdatesPerHour, &out.MaxUpdatesPerHour + *out = new(float64) + **out = **in + } + if in.MaxUserConnections != nil { + in, out := &in.MaxUserConnections, &out.MaxUserConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionLimitsInitParameters. +func (in *ConnectionLimitsInitParameters) DeepCopy() *ConnectionLimitsInitParameters { + if in == nil { + return nil + } + out := new(ConnectionLimitsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionLimitsObservation) DeepCopyInto(out *ConnectionLimitsObservation) { + *out = *in + if in.MaxConnectionsPerHour != nil { + in, out := &in.MaxConnectionsPerHour, &out.MaxConnectionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxQuestionsPerHour != nil { + in, out := &in.MaxQuestionsPerHour, &out.MaxQuestionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxUpdatesPerHour != nil { + in, out := &in.MaxUpdatesPerHour, &out.MaxUpdatesPerHour + *out = new(float64) + **out = **in + } + if in.MaxUserConnections != nil { + in, out := &in.MaxUserConnections, &out.MaxUserConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionLimitsObservation. +func (in *ConnectionLimitsObservation) DeepCopy() *ConnectionLimitsObservation { + if in == nil { + return nil + } + out := new(ConnectionLimitsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionLimitsParameters) DeepCopyInto(out *ConnectionLimitsParameters) { + *out = *in + if in.MaxConnectionsPerHour != nil { + in, out := &in.MaxConnectionsPerHour, &out.MaxConnectionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxQuestionsPerHour != nil { + in, out := &in.MaxQuestionsPerHour, &out.MaxQuestionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxUpdatesPerHour != nil { + in, out := &in.MaxUpdatesPerHour, &out.MaxUpdatesPerHour + *out = new(float64) + **out = **in + } + if in.MaxUserConnections != nil { + in, out := &in.MaxUserConnections, &out.MaxUserConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionLimitsParameters. +func (in *ConnectionLimitsParameters) DeepCopy() *ConnectionLimitsParameters { + if in == nil { + return nil + } + out := new(ConnectionLimitsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorConfigMirrormakerInitParameters) DeepCopyInto(out *ConnectorConfigMirrormakerInitParameters) { + *out = *in + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.SourceCluster != nil { + in, out := &in.SourceCluster, &out.SourceCluster + *out = make([]SourceClusterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetCluster != nil { + in, out := &in.TargetCluster, &out.TargetCluster + *out = make([]TargetClusterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorConfigMirrormakerInitParameters. +func (in *ConnectorConfigMirrormakerInitParameters) DeepCopy() *ConnectorConfigMirrormakerInitParameters { + if in == nil { + return nil + } + out := new(ConnectorConfigMirrormakerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorConfigMirrormakerObservation) DeepCopyInto(out *ConnectorConfigMirrormakerObservation) { + *out = *in + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.SourceCluster != nil { + in, out := &in.SourceCluster, &out.SourceCluster + *out = make([]SourceClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetCluster != nil { + in, out := &in.TargetCluster, &out.TargetCluster + *out = make([]TargetClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorConfigMirrormakerObservation. +func (in *ConnectorConfigMirrormakerObservation) DeepCopy() *ConnectorConfigMirrormakerObservation { + if in == nil { + return nil + } + out := new(ConnectorConfigMirrormakerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorConfigMirrormakerParameters) DeepCopyInto(out *ConnectorConfigMirrormakerParameters) { + *out = *in + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.SourceCluster != nil { + in, out := &in.SourceCluster, &out.SourceCluster + *out = make([]SourceClusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetCluster != nil { + in, out := &in.TargetCluster, &out.TargetCluster + *out = make([]TargetClusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorConfigMirrormakerParameters. +func (in *ConnectorConfigMirrormakerParameters) DeepCopy() *ConnectorConfigMirrormakerParameters { + if in == nil { + return nil + } + out := new(ConnectorConfigMirrormakerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorConfigS3SinkInitParameters) DeepCopyInto(out *ConnectorConfigS3SinkInitParameters) { + *out = *in + if in.FileCompressionType != nil { + in, out := &in.FileCompressionType, &out.FileCompressionType + *out = new(string) + **out = **in + } + if in.FileMaxRecords != nil { + in, out := &in.FileMaxRecords, &out.FileMaxRecords + *out = new(float64) + **out = **in + } + if in.S3Connection != nil { + in, out := &in.S3Connection, &out.S3Connection + *out = make([]S3ConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorConfigS3SinkInitParameters. +func (in *ConnectorConfigS3SinkInitParameters) DeepCopy() *ConnectorConfigS3SinkInitParameters { + if in == nil { + return nil + } + out := new(ConnectorConfigS3SinkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorConfigS3SinkObservation) DeepCopyInto(out *ConnectorConfigS3SinkObservation) { + *out = *in + if in.FileCompressionType != nil { + in, out := &in.FileCompressionType, &out.FileCompressionType + *out = new(string) + **out = **in + } + if in.FileMaxRecords != nil { + in, out := &in.FileMaxRecords, &out.FileMaxRecords + *out = new(float64) + **out = **in + } + if in.S3Connection != nil { + in, out := &in.S3Connection, &out.S3Connection + *out = make([]S3ConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorConfigS3SinkObservation. +func (in *ConnectorConfigS3SinkObservation) DeepCopy() *ConnectorConfigS3SinkObservation { + if in == nil { + return nil + } + out := new(ConnectorConfigS3SinkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorConfigS3SinkParameters) DeepCopyInto(out *ConnectorConfigS3SinkParameters) { + *out = *in + if in.FileCompressionType != nil { + in, out := &in.FileCompressionType, &out.FileCompressionType + *out = new(string) + **out = **in + } + if in.FileMaxRecords != nil { + in, out := &in.FileMaxRecords, &out.FileMaxRecords + *out = new(float64) + **out = **in + } + if in.S3Connection != nil { + in, out := &in.S3Connection, &out.S3Connection + *out = make([]S3ConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorConfigS3SinkParameters. +func (in *ConnectorConfigS3SinkParameters) DeepCopy() *ConnectorConfigS3SinkParameters { + if in == nil { + return nil + } + out := new(ConnectorConfigS3SinkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataNodeInitParameters) DeepCopyInto(out *DataNodeInitParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]DataNodeResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataNodeInitParameters. +func (in *DataNodeInitParameters) DeepCopy() *DataNodeInitParameters { + if in == nil { + return nil + } + out := new(DataNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataNodeObservation) DeepCopyInto(out *DataNodeObservation) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]DataNodeResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataNodeObservation. +func (in *DataNodeObservation) DeepCopy() *DataNodeObservation { + if in == nil { + return nil + } + out := new(DataNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataNodeParameters) DeepCopyInto(out *DataNodeParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]DataNodeResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataNodeParameters. +func (in *DataNodeParameters) DeepCopy() *DataNodeParameters { + if in == nil { + return nil + } + out := new(DataNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataNodeResourcesInitParameters) DeepCopyInto(out *DataNodeResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataNodeResourcesInitParameters. +func (in *DataNodeResourcesInitParameters) DeepCopy() *DataNodeResourcesInitParameters { + if in == nil { + return nil + } + out := new(DataNodeResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataNodeResourcesObservation) DeepCopyInto(out *DataNodeResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataNodeResourcesObservation. +func (in *DataNodeResourcesObservation) DeepCopy() *DataNodeResourcesObservation { + if in == nil { + return nil + } + out := new(DataNodeResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataNodeResourcesParameters) DeepCopyInto(out *DataNodeResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataNodeResourcesParameters. +func (in *DataNodeResourcesParameters) DeepCopy() *DataNodeResourcesParameters { + if in == nil { + return nil + } + out := new(DataNodeResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseInitParameters) DeepCopyInto(out *DatabaseInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseInitParameters. +func (in *DatabaseInitParameters) DeepCopy() *DatabaseInitParameters { + if in == nil { + return nil + } + out := new(DatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObservation) DeepCopyInto(out *DatabaseObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObservation. +func (in *DatabaseObservation) DeepCopy() *DatabaseObservation { + if in == nil { + return nil + } + out := new(DatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseParameters) DeepCopyInto(out *DatabaseParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseParameters. +func (in *DatabaseParameters) DeepCopy() *DatabaseParameters { + if in == nil { + return nil + } + out := new(DatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingInitParameters) DeepCopyInto(out *DiskSizeAutoscalingInitParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingInitParameters. +func (in *DiskSizeAutoscalingInitParameters) DeepCopy() *DiskSizeAutoscalingInitParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongocfgInitParameters) DeepCopyInto(out *DiskSizeAutoscalingMongocfgInitParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongocfgInitParameters. +func (in *DiskSizeAutoscalingMongocfgInitParameters) DeepCopy() *DiskSizeAutoscalingMongocfgInitParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongocfgInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongocfgObservation) DeepCopyInto(out *DiskSizeAutoscalingMongocfgObservation) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongocfgObservation. +func (in *DiskSizeAutoscalingMongocfgObservation) DeepCopy() *DiskSizeAutoscalingMongocfgObservation { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongocfgObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongocfgParameters) DeepCopyInto(out *DiskSizeAutoscalingMongocfgParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongocfgParameters. +func (in *DiskSizeAutoscalingMongocfgParameters) DeepCopy() *DiskSizeAutoscalingMongocfgParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongocfgParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongodInitParameters) DeepCopyInto(out *DiskSizeAutoscalingMongodInitParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongodInitParameters. +func (in *DiskSizeAutoscalingMongodInitParameters) DeepCopy() *DiskSizeAutoscalingMongodInitParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongodObservation) DeepCopyInto(out *DiskSizeAutoscalingMongodObservation) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongodObservation. +func (in *DiskSizeAutoscalingMongodObservation) DeepCopy() *DiskSizeAutoscalingMongodObservation { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongodParameters) DeepCopyInto(out *DiskSizeAutoscalingMongodParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongodParameters. +func (in *DiskSizeAutoscalingMongodParameters) DeepCopy() *DiskSizeAutoscalingMongodParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongoinfraInitParameters) DeepCopyInto(out *DiskSizeAutoscalingMongoinfraInitParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongoinfraInitParameters. +func (in *DiskSizeAutoscalingMongoinfraInitParameters) DeepCopy() *DiskSizeAutoscalingMongoinfraInitParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongoinfraInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongoinfraObservation) DeepCopyInto(out *DiskSizeAutoscalingMongoinfraObservation) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongoinfraObservation. +func (in *DiskSizeAutoscalingMongoinfraObservation) DeepCopy() *DiskSizeAutoscalingMongoinfraObservation { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongoinfraObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongoinfraParameters) DeepCopyInto(out *DiskSizeAutoscalingMongoinfraParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongoinfraParameters. +func (in *DiskSizeAutoscalingMongoinfraParameters) DeepCopy() *DiskSizeAutoscalingMongoinfraParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongoinfraParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongosInitParameters) DeepCopyInto(out *DiskSizeAutoscalingMongosInitParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongosInitParameters. +func (in *DiskSizeAutoscalingMongosInitParameters) DeepCopy() *DiskSizeAutoscalingMongosInitParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongosInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongosObservation) DeepCopyInto(out *DiskSizeAutoscalingMongosObservation) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongosObservation. +func (in *DiskSizeAutoscalingMongosObservation) DeepCopy() *DiskSizeAutoscalingMongosObservation { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongosObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingMongosParameters) DeepCopyInto(out *DiskSizeAutoscalingMongosParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingMongosParameters. +func (in *DiskSizeAutoscalingMongosParameters) DeepCopy() *DiskSizeAutoscalingMongosParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingMongosParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingObservation) DeepCopyInto(out *DiskSizeAutoscalingObservation) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingObservation. +func (in *DiskSizeAutoscalingObservation) DeepCopy() *DiskSizeAutoscalingObservation { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSizeAutoscalingParameters) DeepCopyInto(out *DiskSizeAutoscalingParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSizeAutoscalingParameters. +func (in *DiskSizeAutoscalingParameters) DeepCopy() *DiskSizeAutoscalingParameters { + if in == nil { + return nil + } + out := new(DiskSizeAutoscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchCluster) DeepCopyInto(out *ElasticsearchCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchCluster. +func (in *ElasticsearchCluster) DeepCopy() *ElasticsearchCluster { + if in == nil { + return nil + } + out := new(ElasticsearchCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ElasticsearchCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterConfigInitParameters) DeepCopyInto(out *ElasticsearchClusterConfigInitParameters) { + *out = *in + out.AdminPasswordSecretRef = in.AdminPasswordSecretRef + if in.DataNode != nil { + in, out := &in.DataNode, &out.DataNode + *out = make([]DataNodeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.MasterNode != nil { + in, out := &in.MasterNode, &out.MasterNode + *out = make([]MasterNodeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterConfigInitParameters. +func (in *ElasticsearchClusterConfigInitParameters) DeepCopy() *ElasticsearchClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(ElasticsearchClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterConfigObservation) DeepCopyInto(out *ElasticsearchClusterConfigObservation) { + *out = *in + if in.DataNode != nil { + in, out := &in.DataNode, &out.DataNode + *out = make([]DataNodeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.MasterNode != nil { + in, out := &in.MasterNode, &out.MasterNode + *out = make([]MasterNodeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterConfigObservation. +func (in *ElasticsearchClusterConfigObservation) DeepCopy() *ElasticsearchClusterConfigObservation { + if in == nil { + return nil + } + out := new(ElasticsearchClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterConfigParameters) DeepCopyInto(out *ElasticsearchClusterConfigParameters) { + *out = *in + out.AdminPasswordSecretRef = in.AdminPasswordSecretRef + if in.DataNode != nil { + in, out := &in.DataNode, &out.DataNode + *out = make([]DataNodeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.MasterNode != nil { + in, out := &in.MasterNode, &out.MasterNode + *out = make([]MasterNodeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterConfigParameters. +func (in *ElasticsearchClusterConfigParameters) DeepCopy() *ElasticsearchClusterConfigParameters { + if in == nil { + return nil + } + out := new(ElasticsearchClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterHostInitParameters) DeepCopyInto(out *ElasticsearchClusterHostInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterHostInitParameters. +func (in *ElasticsearchClusterHostInitParameters) DeepCopy() *ElasticsearchClusterHostInitParameters { + if in == nil { + return nil + } + out := new(ElasticsearchClusterHostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterHostObservation) DeepCopyInto(out *ElasticsearchClusterHostObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterHostObservation. +func (in *ElasticsearchClusterHostObservation) DeepCopy() *ElasticsearchClusterHostObservation { + if in == nil { + return nil + } + out := new(ElasticsearchClusterHostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterHostParameters) DeepCopyInto(out *ElasticsearchClusterHostParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterHostParameters. +func (in *ElasticsearchClusterHostParameters) DeepCopy() *ElasticsearchClusterHostParameters { + if in == nil { + return nil + } + out := new(ElasticsearchClusterHostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterInitParameters) DeepCopyInto(out *ElasticsearchClusterInitParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ElasticsearchClusterConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]ElasticsearchClusterHostInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]ElasticsearchClusterMaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterInitParameters. +func (in *ElasticsearchClusterInitParameters) DeepCopy() *ElasticsearchClusterInitParameters { + if in == nil { + return nil + } + out := new(ElasticsearchClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterList) DeepCopyInto(out *ElasticsearchClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ElasticsearchCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterList. +func (in *ElasticsearchClusterList) DeepCopy() *ElasticsearchClusterList { + if in == nil { + return nil + } + out := new(ElasticsearchClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ElasticsearchClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterMaintenanceWindowInitParameters) DeepCopyInto(out *ElasticsearchClusterMaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterMaintenanceWindowInitParameters. +func (in *ElasticsearchClusterMaintenanceWindowInitParameters) DeepCopy() *ElasticsearchClusterMaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(ElasticsearchClusterMaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterMaintenanceWindowObservation) DeepCopyInto(out *ElasticsearchClusterMaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterMaintenanceWindowObservation. +func (in *ElasticsearchClusterMaintenanceWindowObservation) DeepCopy() *ElasticsearchClusterMaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(ElasticsearchClusterMaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterMaintenanceWindowParameters) DeepCopyInto(out *ElasticsearchClusterMaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterMaintenanceWindowParameters. +func (in *ElasticsearchClusterMaintenanceWindowParameters) DeepCopy() *ElasticsearchClusterMaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(ElasticsearchClusterMaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterObservation) DeepCopyInto(out *ElasticsearchClusterObservation) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ElasticsearchClusterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]ElasticsearchClusterHostObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]ElasticsearchClusterMaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterObservation. +func (in *ElasticsearchClusterObservation) DeepCopy() *ElasticsearchClusterObservation { + if in == nil { + return nil + } + out := new(ElasticsearchClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterParameters) DeepCopyInto(out *ElasticsearchClusterParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ElasticsearchClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]ElasticsearchClusterHostParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]ElasticsearchClusterMaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterParameters. +func (in *ElasticsearchClusterParameters) DeepCopy() *ElasticsearchClusterParameters { + if in == nil { + return nil + } + out := new(ElasticsearchClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterSpec) DeepCopyInto(out *ElasticsearchClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterSpec. +func (in *ElasticsearchClusterSpec) DeepCopy() *ElasticsearchClusterSpec { + if in == nil { + return nil + } + out := new(ElasticsearchClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchClusterStatus) DeepCopyInto(out *ElasticsearchClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterStatus. +func (in *ElasticsearchClusterStatus) DeepCopy() *ElasticsearchClusterStatus { + if in == nil { + return nil + } + out := new(ElasticsearchClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionInitParameters) DeepCopyInto(out *ExtensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionInitParameters. +func (in *ExtensionInitParameters) DeepCopy() *ExtensionInitParameters { + if in == nil { + return nil + } + out := new(ExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionObservation) DeepCopyInto(out *ExtensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionObservation. +func (in *ExtensionObservation) DeepCopy() *ExtensionObservation { + if in == nil { + return nil + } + out := new(ExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionParameters) DeepCopyInto(out *ExtensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionParameters. +func (in *ExtensionParameters) DeepCopy() *ExtensionParameters { + if in == nil { + return nil + } + out := new(ExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalClusterInitParameters) DeepCopyInto(out *ExternalClusterInitParameters) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = new(string) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalClusterInitParameters. +func (in *ExternalClusterInitParameters) DeepCopy() *ExternalClusterInitParameters { + if in == nil { + return nil + } + out := new(ExternalClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalClusterObservation) DeepCopyInto(out *ExternalClusterObservation) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = new(string) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalClusterObservation. +func (in *ExternalClusterObservation) DeepCopy() *ExternalClusterObservation { + if in == nil { + return nil + } + out := new(ExternalClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalClusterParameters) DeepCopyInto(out *ExternalClusterParameters) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = new(string) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalClusterParameters. +func (in *ExternalClusterParameters) DeepCopy() *ExternalClusterParameters { + if in == nil { + return nil + } + out := new(ExternalClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalS3InitParameters) DeepCopyInto(out *ExternalS3InitParameters) { + *out = *in + if in.AccessKeyID != nil { + in, out := &in.AccessKeyID, &out.AccessKeyID + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SecretAccessKeySecretRef != nil { + in, out := &in.SecretAccessKeySecretRef, &out.SecretAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalS3InitParameters. +func (in *ExternalS3InitParameters) DeepCopy() *ExternalS3InitParameters { + if in == nil { + return nil + } + out := new(ExternalS3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalS3Observation) DeepCopyInto(out *ExternalS3Observation) { + *out = *in + if in.AccessKeyID != nil { + in, out := &in.AccessKeyID, &out.AccessKeyID + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalS3Observation. +func (in *ExternalS3Observation) DeepCopy() *ExternalS3Observation { + if in == nil { + return nil + } + out := new(ExternalS3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalS3Parameters) DeepCopyInto(out *ExternalS3Parameters) { + *out = *in + if in.AccessKeyID != nil { + in, out := &in.AccessKeyID, &out.AccessKeyID + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SecretAccessKeySecretRef != nil { + in, out := &in.SecretAccessKeySecretRef, &out.SecretAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalS3Parameters. +func (in *ExternalS3Parameters) DeepCopy() *ExternalS3Parameters { + if in == nil { + return nil + } + out := new(ExternalS3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatSchemaInitParameters) DeepCopyInto(out *FormatSchemaInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatSchemaInitParameters. +func (in *FormatSchemaInitParameters) DeepCopy() *FormatSchemaInitParameters { + if in == nil { + return nil + } + out := new(FormatSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatSchemaObservation) DeepCopyInto(out *FormatSchemaObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatSchemaObservation. +func (in *FormatSchemaObservation) DeepCopy() *FormatSchemaObservation { + if in == nil { + return nil + } + out := new(FormatSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatSchemaParameters) DeepCopyInto(out *FormatSchemaParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatSchemaParameters. +func (in *FormatSchemaParameters) DeepCopy() *FormatSchemaParameters { + if in == nil { + return nil + } + out := new(FormatSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphiteRollupInitParameters) DeepCopyInto(out *GraphiteRollupInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathColumnName != nil { + in, out := &in.PathColumnName, &out.PathColumnName + *out = new(string) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = make([]PatternInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeColumnName != nil { + in, out := &in.TimeColumnName, &out.TimeColumnName + *out = new(string) + **out = **in + } + if in.ValueColumnName != nil { + in, out := &in.ValueColumnName, &out.ValueColumnName + *out = new(string) + **out = **in + } + if in.VersionColumnName != nil { + in, out := &in.VersionColumnName, &out.VersionColumnName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphiteRollupInitParameters. +func (in *GraphiteRollupInitParameters) DeepCopy() *GraphiteRollupInitParameters { + if in == nil { + return nil + } + out := new(GraphiteRollupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphiteRollupObservation) DeepCopyInto(out *GraphiteRollupObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathColumnName != nil { + in, out := &in.PathColumnName, &out.PathColumnName + *out = new(string) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = make([]PatternObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeColumnName != nil { + in, out := &in.TimeColumnName, &out.TimeColumnName + *out = new(string) + **out = **in + } + if in.ValueColumnName != nil { + in, out := &in.ValueColumnName, &out.ValueColumnName + *out = new(string) + **out = **in + } + if in.VersionColumnName != nil { + in, out := &in.VersionColumnName, &out.VersionColumnName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphiteRollupObservation. +func (in *GraphiteRollupObservation) DeepCopy() *GraphiteRollupObservation { + if in == nil { + return nil + } + out := new(GraphiteRollupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphiteRollupParameters) DeepCopyInto(out *GraphiteRollupParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathColumnName != nil { + in, out := &in.PathColumnName, &out.PathColumnName + *out = new(string) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = make([]PatternParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeColumnName != nil { + in, out := &in.TimeColumnName, &out.TimeColumnName + *out = new(string) + **out = **in + } + if in.ValueColumnName != nil { + in, out := &in.ValueColumnName, &out.ValueColumnName + *out = new(string) + **out = **in + } + if in.VersionColumnName != nil { + in, out := &in.VersionColumnName, &out.VersionColumnName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphiteRollupParameters. +func (in *GraphiteRollupParameters) DeepCopy() *GraphiteRollupParameters { + if in == nil { + return nil + } + out := new(GraphiteRollupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumCluster) DeepCopyInto(out *GreenplumCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumCluster. +func (in *GreenplumCluster) DeepCopy() *GreenplumCluster { + if in == nil { + return nil + } + out := new(GreenplumCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GreenplumCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterAccessInitParameters) DeepCopyInto(out *GreenplumClusterAccessInitParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } + if in.YandexQuery != nil { + in, out := &in.YandexQuery, &out.YandexQuery + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterAccessInitParameters. +func (in *GreenplumClusterAccessInitParameters) DeepCopy() *GreenplumClusterAccessInitParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterAccessObservation) DeepCopyInto(out *GreenplumClusterAccessObservation) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } + if in.YandexQuery != nil { + in, out := &in.YandexQuery, &out.YandexQuery + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterAccessObservation. +func (in *GreenplumClusterAccessObservation) DeepCopy() *GreenplumClusterAccessObservation { + if in == nil { + return nil + } + out := new(GreenplumClusterAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterAccessParameters) DeepCopyInto(out *GreenplumClusterAccessParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } + if in.YandexQuery != nil { + in, out := &in.YandexQuery, &out.YandexQuery + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterAccessParameters. +func (in *GreenplumClusterAccessParameters) DeepCopy() *GreenplumClusterAccessParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterBackupWindowStartInitParameters) DeepCopyInto(out *GreenplumClusterBackupWindowStartInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterBackupWindowStartInitParameters. +func (in *GreenplumClusterBackupWindowStartInitParameters) DeepCopy() *GreenplumClusterBackupWindowStartInitParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterBackupWindowStartInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterBackupWindowStartObservation) DeepCopyInto(out *GreenplumClusterBackupWindowStartObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterBackupWindowStartObservation. +func (in *GreenplumClusterBackupWindowStartObservation) DeepCopy() *GreenplumClusterBackupWindowStartObservation { + if in == nil { + return nil + } + out := new(GreenplumClusterBackupWindowStartObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterBackupWindowStartParameters) DeepCopyInto(out *GreenplumClusterBackupWindowStartParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterBackupWindowStartParameters. +func (in *GreenplumClusterBackupWindowStartParameters) DeepCopy() *GreenplumClusterBackupWindowStartParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterBackupWindowStartParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterCloudStorageInitParameters) DeepCopyInto(out *GreenplumClusterCloudStorageInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterCloudStorageInitParameters. +func (in *GreenplumClusterCloudStorageInitParameters) DeepCopy() *GreenplumClusterCloudStorageInitParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterCloudStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterCloudStorageObservation) DeepCopyInto(out *GreenplumClusterCloudStorageObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterCloudStorageObservation. +func (in *GreenplumClusterCloudStorageObservation) DeepCopy() *GreenplumClusterCloudStorageObservation { + if in == nil { + return nil + } + out := new(GreenplumClusterCloudStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterCloudStorageParameters) DeepCopyInto(out *GreenplumClusterCloudStorageParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterCloudStorageParameters. +func (in *GreenplumClusterCloudStorageParameters) DeepCopy() *GreenplumClusterCloudStorageParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterCloudStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterInitParameters) DeepCopyInto(out *GreenplumClusterInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]GreenplumClusterAccessInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.BackgroundActivities != nil { + in, out := &in.BackgroundActivities, &out.BackgroundActivities + *out = make([]BackgroundActivitiesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]GreenplumClusterBackupWindowStartInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudStorage != nil { + in, out := &in.CloudStorage, &out.CloudStorage + *out = make([]GreenplumClusterCloudStorageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GreenplumConfig != nil { + in, out := &in.GreenplumConfig, &out.GreenplumConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]GreenplumClusterMaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterHostCount != nil { + in, out := &in.MasterHostCount, &out.MasterHostCount + *out = new(float64) + **out = **in + } + if in.MasterHostGroupIds != nil { + in, out := &in.MasterHostGroupIds, &out.MasterHostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MasterSubcluster != nil { + in, out := &in.MasterSubcluster, &out.MasterSubcluster + *out = make([]MasterSubclusterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PoolerConfig != nil { + in, out := &in.PoolerConfig, &out.PoolerConfig + *out = make([]PoolerConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PxfConfig != nil { + in, out := &in.PxfConfig, &out.PxfConfig + *out = make([]PxfConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SegmentHostCount != nil { + in, out := &in.SegmentHostCount, &out.SegmentHostCount + *out = new(float64) + **out = **in + } + if in.SegmentHostGroupIds != nil { + in, out := &in.SegmentHostGroupIds, &out.SegmentHostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SegmentInHost != nil { + in, out := &in.SegmentInHost, &out.SegmentInHost + *out = new(float64) + **out = **in + } + if in.SegmentSubcluster != nil { + in, out := &in.SegmentSubcluster, &out.SegmentSubcluster + *out = make([]SegmentSubclusterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } + out.UserPasswordSecretRef = in.UserPasswordSecretRef + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterInitParameters. +func (in *GreenplumClusterInitParameters) DeepCopy() *GreenplumClusterInitParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterList) DeepCopyInto(out *GreenplumClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GreenplumCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterList. +func (in *GreenplumClusterList) DeepCopy() *GreenplumClusterList { + if in == nil { + return nil + } + out := new(GreenplumClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GreenplumClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterMaintenanceWindowInitParameters) DeepCopyInto(out *GreenplumClusterMaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterMaintenanceWindowInitParameters. +func (in *GreenplumClusterMaintenanceWindowInitParameters) DeepCopy() *GreenplumClusterMaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterMaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterMaintenanceWindowObservation) DeepCopyInto(out *GreenplumClusterMaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterMaintenanceWindowObservation. +func (in *GreenplumClusterMaintenanceWindowObservation) DeepCopy() *GreenplumClusterMaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(GreenplumClusterMaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterMaintenanceWindowParameters) DeepCopyInto(out *GreenplumClusterMaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterMaintenanceWindowParameters. +func (in *GreenplumClusterMaintenanceWindowParameters) DeepCopy() *GreenplumClusterMaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterMaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterObservation) DeepCopyInto(out *GreenplumClusterObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]GreenplumClusterAccessObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.BackgroundActivities != nil { + in, out := &in.BackgroundActivities, &out.BackgroundActivities + *out = make([]BackgroundActivitiesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]GreenplumClusterBackupWindowStartObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudStorage != nil { + in, out := &in.CloudStorage, &out.CloudStorage + *out = make([]GreenplumClusterCloudStorageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.GreenplumConfig != nil { + in, out := &in.GreenplumConfig, &out.GreenplumConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]GreenplumClusterMaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterHostCount != nil { + in, out := &in.MasterHostCount, &out.MasterHostCount + *out = new(float64) + **out = **in + } + if in.MasterHostGroupIds != nil { + in, out := &in.MasterHostGroupIds, &out.MasterHostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MasterHosts != nil { + in, out := &in.MasterHosts, &out.MasterHosts + *out = make([]MasterHostsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterSubcluster != nil { + in, out := &in.MasterSubcluster, &out.MasterSubcluster + *out = make([]MasterSubclusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.PoolerConfig != nil { + in, out := &in.PoolerConfig, &out.PoolerConfig + *out = make([]PoolerConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PxfConfig != nil { + in, out := &in.PxfConfig, &out.PxfConfig + *out = make([]PxfConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SegmentHostCount != nil { + in, out := &in.SegmentHostCount, &out.SegmentHostCount + *out = new(float64) + **out = **in + } + if in.SegmentHostGroupIds != nil { + in, out := &in.SegmentHostGroupIds, &out.SegmentHostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SegmentHosts != nil { + in, out := &in.SegmentHosts, &out.SegmentHosts + *out = make([]SegmentHostsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SegmentInHost != nil { + in, out := &in.SegmentInHost, &out.SegmentInHost + *out = new(float64) + **out = **in + } + if in.SegmentSubcluster != nil { + in, out := &in.SegmentSubcluster, &out.SegmentSubcluster + *out = make([]SegmentSubclusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterObservation. +func (in *GreenplumClusterObservation) DeepCopy() *GreenplumClusterObservation { + if in == nil { + return nil + } + out := new(GreenplumClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterParameters) DeepCopyInto(out *GreenplumClusterParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]GreenplumClusterAccessParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.BackgroundActivities != nil { + in, out := &in.BackgroundActivities, &out.BackgroundActivities + *out = make([]BackgroundActivitiesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]GreenplumClusterBackupWindowStartParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudStorage != nil { + in, out := &in.CloudStorage, &out.CloudStorage + *out = make([]GreenplumClusterCloudStorageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GreenplumConfig != nil { + in, out := &in.GreenplumConfig, &out.GreenplumConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]GreenplumClusterMaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterHostCount != nil { + in, out := &in.MasterHostCount, &out.MasterHostCount + *out = new(float64) + **out = **in + } + if in.MasterHostGroupIds != nil { + in, out := &in.MasterHostGroupIds, &out.MasterHostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MasterSubcluster != nil { + in, out := &in.MasterSubcluster, &out.MasterSubcluster + *out = make([]MasterSubclusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PoolerConfig != nil { + in, out := &in.PoolerConfig, &out.PoolerConfig + *out = make([]PoolerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PxfConfig != nil { + in, out := &in.PxfConfig, &out.PxfConfig + *out = make([]PxfConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SegmentHostCount != nil { + in, out := &in.SegmentHostCount, &out.SegmentHostCount + *out = new(float64) + **out = **in + } + if in.SegmentHostGroupIds != nil { + in, out := &in.SegmentHostGroupIds, &out.SegmentHostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SegmentInHost != nil { + in, out := &in.SegmentInHost, &out.SegmentInHost + *out = new(float64) + **out = **in + } + if in.SegmentSubcluster != nil { + in, out := &in.SegmentSubcluster, &out.SegmentSubcluster + *out = make([]SegmentSubclusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } + out.UserPasswordSecretRef = in.UserPasswordSecretRef + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterParameters. +func (in *GreenplumClusterParameters) DeepCopy() *GreenplumClusterParameters { + if in == nil { + return nil + } + out := new(GreenplumClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterSpec) DeepCopyInto(out *GreenplumClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterSpec. +func (in *GreenplumClusterSpec) DeepCopy() *GreenplumClusterSpec { + if in == nil { + return nil + } + out := new(GreenplumClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenplumClusterStatus) DeepCopyInto(out *GreenplumClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenplumClusterStatus. +func (in *GreenplumClusterStatus) DeepCopy() *GreenplumClusterStatus { + if in == nil { + return nil + } + out := new(GreenplumClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostInitParameters) DeepCopyInto(out *HostInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostInitParameters. +func (in *HostInitParameters) DeepCopy() *HostInitParameters { + if in == nil { + return nil + } + out := new(HostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostObservation) DeepCopyInto(out *HostObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostObservation. +func (in *HostObservation) DeepCopy() *HostObservation { + if in == nil { + return nil + } + out := new(HostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostParameters) DeepCopyInto(out *HostParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostParameters. +func (in *HostParameters) DeepCopy() *HostParameters { + if in == nil { + return nil + } + out := new(HostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostParametersInitParameters) DeepCopyInto(out *HostParametersInitParameters) { + *out = *in + if in.Hidden != nil { + in, out := &in.Hidden, &out.Hidden + *out = new(bool) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SecondaryDelaySecs != nil { + in, out := &in.SecondaryDelaySecs, &out.SecondaryDelaySecs + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostParametersInitParameters. +func (in *HostParametersInitParameters) DeepCopy() *HostParametersInitParameters { + if in == nil { + return nil + } + out := new(HostParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostParametersObservation) DeepCopyInto(out *HostParametersObservation) { + *out = *in + if in.Hidden != nil { + in, out := &in.Hidden, &out.Hidden + *out = new(bool) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SecondaryDelaySecs != nil { + in, out := &in.SecondaryDelaySecs, &out.SecondaryDelaySecs + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostParametersObservation. +func (in *HostParametersObservation) DeepCopy() *HostParametersObservation { + if in == nil { + return nil + } + out := new(HostParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostParametersParameters) DeepCopyInto(out *HostParametersParameters) { + *out = *in + if in.Hidden != nil { + in, out := &in.Hidden, &out.Hidden + *out = new(bool) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SecondaryDelaySecs != nil { + in, out := &in.SecondaryDelaySecs, &out.SecondaryDelaySecs + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostParametersParameters. +func (in *HostParametersParameters) DeepCopy() *HostParametersParameters { + if in == nil { + return nil + } + out := new(HostParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JournalInitParameters) DeepCopyInto(out *JournalInitParameters) { + *out = *in + if in.CommitInterval != nil { + in, out := &in.CommitInterval, &out.CommitInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JournalInitParameters. +func (in *JournalInitParameters) DeepCopy() *JournalInitParameters { + if in == nil { + return nil + } + out := new(JournalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JournalObservation) DeepCopyInto(out *JournalObservation) { + *out = *in + if in.CommitInterval != nil { + in, out := &in.CommitInterval, &out.CommitInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JournalObservation. +func (in *JournalObservation) DeepCopy() *JournalObservation { + if in == nil { + return nil + } + out := new(JournalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JournalParameters) DeepCopyInto(out *JournalParameters) { + *out = *in + if in.CommitInterval != nil { + in, out := &in.CommitInterval, &out.CommitInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JournalParameters. +func (in *JournalParameters) DeepCopy() *JournalParameters { + if in == nil { + return nil + } + out := new(JournalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaCluster) DeepCopyInto(out *KafkaCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaCluster. +func (in *KafkaCluster) DeepCopy() *KafkaCluster { + if in == nil { + return nil + } + out := new(KafkaCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterConfigInitParameters) DeepCopyInto(out *KafkaClusterConfigInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]ConfigAccessInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.BrokersCount != nil { + in, out := &in.BrokersCount, &out.BrokersCount + *out = new(float64) + **out = **in + } + if in.DiskSizeAutoscaling != nil { + in, out := &in.DiskSizeAutoscaling, &out.DiskSizeAutoscaling + *out = make([]DiskSizeAutoscalingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = make([]ConfigKafkaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaRegistry != nil { + in, out := &in.SchemaRegistry, &out.SchemaRegistry + *out = new(bool) + **out = **in + } + if in.UnmanagedTopics != nil { + in, out := &in.UnmanagedTopics, &out.UnmanagedTopics + *out = new(bool) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zookeeper != nil { + in, out := &in.Zookeeper, &out.Zookeeper + *out = make([]ConfigZookeeperInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterConfigInitParameters. +func (in *KafkaClusterConfigInitParameters) DeepCopy() *KafkaClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterConfigObservation) DeepCopyInto(out *KafkaClusterConfigObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]ConfigAccessObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.BrokersCount != nil { + in, out := &in.BrokersCount, &out.BrokersCount + *out = new(float64) + **out = **in + } + if in.DiskSizeAutoscaling != nil { + in, out := &in.DiskSizeAutoscaling, &out.DiskSizeAutoscaling + *out = make([]DiskSizeAutoscalingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = make([]ConfigKafkaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaRegistry != nil { + in, out := &in.SchemaRegistry, &out.SchemaRegistry + *out = new(bool) + **out = **in + } + if in.UnmanagedTopics != nil { + in, out := &in.UnmanagedTopics, &out.UnmanagedTopics + *out = new(bool) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zookeeper != nil { + in, out := &in.Zookeeper, &out.Zookeeper + *out = make([]ConfigZookeeperObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterConfigObservation. +func (in *KafkaClusterConfigObservation) DeepCopy() *KafkaClusterConfigObservation { + if in == nil { + return nil + } + out := new(KafkaClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterConfigParameters) DeepCopyInto(out *KafkaClusterConfigParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]ConfigAccessParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.BrokersCount != nil { + in, out := &in.BrokersCount, &out.BrokersCount + *out = new(float64) + **out = **in + } + if in.DiskSizeAutoscaling != nil { + in, out := &in.DiskSizeAutoscaling, &out.DiskSizeAutoscaling + *out = make([]DiskSizeAutoscalingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = make([]ConfigKafkaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaRegistry != nil { + in, out := &in.SchemaRegistry, &out.SchemaRegistry + *out = new(bool) + **out = **in + } + if in.UnmanagedTopics != nil { + in, out := &in.UnmanagedTopics, &out.UnmanagedTopics + *out = new(bool) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zookeeper != nil { + in, out := &in.Zookeeper, &out.Zookeeper + *out = make([]ConfigZookeeperParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterConfigParameters. +func (in *KafkaClusterConfigParameters) DeepCopy() *KafkaClusterConfigParameters { + if in == nil { + return nil + } + out := new(KafkaClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterHostInitParameters) DeepCopyInto(out *KafkaClusterHostInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterHostInitParameters. +func (in *KafkaClusterHostInitParameters) DeepCopy() *KafkaClusterHostInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterHostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterHostObservation) DeepCopyInto(out *KafkaClusterHostObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterHostObservation. +func (in *KafkaClusterHostObservation) DeepCopy() *KafkaClusterHostObservation { + if in == nil { + return nil + } + out := new(KafkaClusterHostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterHostParameters) DeepCopyInto(out *KafkaClusterHostParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterHostParameters. +func (in *KafkaClusterHostParameters) DeepCopy() *KafkaClusterHostParameters { + if in == nil { + return nil + } + out := new(KafkaClusterHostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterInitParameters) DeepCopyInto(out *KafkaClusterInitParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]KafkaClusterConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]KafkaClusterMaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = make([]TopicInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]KafkaClusterUserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterInitParameters. +func (in *KafkaClusterInitParameters) DeepCopy() *KafkaClusterInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterList) DeepCopyInto(out *KafkaClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterList. +func (in *KafkaClusterList) DeepCopy() *KafkaClusterList { + if in == nil { + return nil + } + out := new(KafkaClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMaintenanceWindowInitParameters) DeepCopyInto(out *KafkaClusterMaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMaintenanceWindowInitParameters. +func (in *KafkaClusterMaintenanceWindowInitParameters) DeepCopy() *KafkaClusterMaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMaintenanceWindowObservation) DeepCopyInto(out *KafkaClusterMaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMaintenanceWindowObservation. +func (in *KafkaClusterMaintenanceWindowObservation) DeepCopy() *KafkaClusterMaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(KafkaClusterMaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMaintenanceWindowParameters) DeepCopyInto(out *KafkaClusterMaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMaintenanceWindowParameters. +func (in *KafkaClusterMaintenanceWindowParameters) DeepCopy() *KafkaClusterMaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterObservation) DeepCopyInto(out *KafkaClusterObservation) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]KafkaClusterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]KafkaClusterHostObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]KafkaClusterMaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = make([]TopicObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]KafkaClusterUserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterObservation. +func (in *KafkaClusterObservation) DeepCopy() *KafkaClusterObservation { + if in == nil { + return nil + } + out := new(KafkaClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterParameters) DeepCopyInto(out *KafkaClusterParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]KafkaClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]KafkaClusterMaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = make([]TopicParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]KafkaClusterUserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterParameters. +func (in *KafkaClusterParameters) DeepCopy() *KafkaClusterParameters { + if in == nil { + return nil + } + out := new(KafkaClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterSpec) DeepCopyInto(out *KafkaClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterSpec. +func (in *KafkaClusterSpec) DeepCopy() *KafkaClusterSpec { + if in == nil { + return nil + } + out := new(KafkaClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterStatus) DeepCopyInto(out *KafkaClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterStatus. +func (in *KafkaClusterStatus) DeepCopy() *KafkaClusterStatus { + if in == nil { + return nil + } + out := new(KafkaClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterUserInitParameters) DeepCopyInto(out *KafkaClusterUserInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]UserPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterUserInitParameters. +func (in *KafkaClusterUserInitParameters) DeepCopy() *KafkaClusterUserInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterUserObservation) DeepCopyInto(out *KafkaClusterUserObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]UserPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterUserObservation. +func (in *KafkaClusterUserObservation) DeepCopy() *KafkaClusterUserObservation { + if in == nil { + return nil + } + out := new(KafkaClusterUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterUserParameters) DeepCopyInto(out *KafkaClusterUserParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]UserPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterUserParameters. +func (in *KafkaClusterUserParameters) DeepCopy() *KafkaClusterUserParameters { + if in == nil { + return nil + } + out := new(KafkaClusterUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConfigInitParameters) DeepCopyInto(out *KafkaConfigInitParameters) { + *out = *in + if in.AutoCreateTopicsEnable != nil { + in, out := &in.AutoCreateTopicsEnable, &out.AutoCreateTopicsEnable + *out = new(bool) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.DefaultReplicationFactor != nil { + in, out := &in.DefaultReplicationFactor, &out.DefaultReplicationFactor + *out = new(string) + **out = **in + } + if in.LogFlushIntervalMessages != nil { + in, out := &in.LogFlushIntervalMessages, &out.LogFlushIntervalMessages + *out = new(string) + **out = **in + } + if in.LogFlushIntervalMs != nil { + in, out := &in.LogFlushIntervalMs, &out.LogFlushIntervalMs + *out = new(string) + **out = **in + } + if in.LogFlushSchedulerIntervalMs != nil { + in, out := &in.LogFlushSchedulerIntervalMs, &out.LogFlushSchedulerIntervalMs + *out = new(string) + **out = **in + } + if in.LogPreallocate != nil { + in, out := &in.LogPreallocate, &out.LogPreallocate + *out = new(bool) + **out = **in + } + if in.LogRetentionBytes != nil { + in, out := &in.LogRetentionBytes, &out.LogRetentionBytes + *out = new(string) + **out = **in + } + if in.LogRetentionHours != nil { + in, out := &in.LogRetentionHours, &out.LogRetentionHours + *out = new(string) + **out = **in + } + if in.LogRetentionMinutes != nil { + in, out := &in.LogRetentionMinutes, &out.LogRetentionMinutes + *out = new(string) + **out = **in + } + if in.LogRetentionMs != nil { + in, out := &in.LogRetentionMs, &out.LogRetentionMs + *out = new(string) + **out = **in + } + if in.LogSegmentBytes != nil { + in, out := &in.LogSegmentBytes, &out.LogSegmentBytes + *out = new(string) + **out = **in + } + if in.MessageMaxBytes != nil { + in, out := &in.MessageMaxBytes, &out.MessageMaxBytes + *out = new(string) + **out = **in + } + if in.NumPartitions != nil { + in, out := &in.NumPartitions, &out.NumPartitions + *out = new(string) + **out = **in + } + if in.OffsetsRetentionMinutes != nil { + in, out := &in.OffsetsRetentionMinutes, &out.OffsetsRetentionMinutes + *out = new(string) + **out = **in + } + if in.ReplicaFetchMaxBytes != nil { + in, out := &in.ReplicaFetchMaxBytes, &out.ReplicaFetchMaxBytes + *out = new(string) + **out = **in + } + if in.SSLCipherSuites != nil { + in, out := &in.SSLCipherSuites, &out.SSLCipherSuites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SaslEnabledMechanisms != nil { + in, out := &in.SaslEnabledMechanisms, &out.SaslEnabledMechanisms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SocketReceiveBufferBytes != nil { + in, out := &in.SocketReceiveBufferBytes, &out.SocketReceiveBufferBytes + *out = new(string) + **out = **in + } + if in.SocketSendBufferBytes != nil { + in, out := &in.SocketSendBufferBytes, &out.SocketSendBufferBytes + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConfigInitParameters. +func (in *KafkaConfigInitParameters) DeepCopy() *KafkaConfigInitParameters { + if in == nil { + return nil + } + out := new(KafkaConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConfigObservation) DeepCopyInto(out *KafkaConfigObservation) { + *out = *in + if in.AutoCreateTopicsEnable != nil { + in, out := &in.AutoCreateTopicsEnable, &out.AutoCreateTopicsEnable + *out = new(bool) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.DefaultReplicationFactor != nil { + in, out := &in.DefaultReplicationFactor, &out.DefaultReplicationFactor + *out = new(string) + **out = **in + } + if in.LogFlushIntervalMessages != nil { + in, out := &in.LogFlushIntervalMessages, &out.LogFlushIntervalMessages + *out = new(string) + **out = **in + } + if in.LogFlushIntervalMs != nil { + in, out := &in.LogFlushIntervalMs, &out.LogFlushIntervalMs + *out = new(string) + **out = **in + } + if in.LogFlushSchedulerIntervalMs != nil { + in, out := &in.LogFlushSchedulerIntervalMs, &out.LogFlushSchedulerIntervalMs + *out = new(string) + **out = **in + } + if in.LogPreallocate != nil { + in, out := &in.LogPreallocate, &out.LogPreallocate + *out = new(bool) + **out = **in + } + if in.LogRetentionBytes != nil { + in, out := &in.LogRetentionBytes, &out.LogRetentionBytes + *out = new(string) + **out = **in + } + if in.LogRetentionHours != nil { + in, out := &in.LogRetentionHours, &out.LogRetentionHours + *out = new(string) + **out = **in + } + if in.LogRetentionMinutes != nil { + in, out := &in.LogRetentionMinutes, &out.LogRetentionMinutes + *out = new(string) + **out = **in + } + if in.LogRetentionMs != nil { + in, out := &in.LogRetentionMs, &out.LogRetentionMs + *out = new(string) + **out = **in + } + if in.LogSegmentBytes != nil { + in, out := &in.LogSegmentBytes, &out.LogSegmentBytes + *out = new(string) + **out = **in + } + if in.MessageMaxBytes != nil { + in, out := &in.MessageMaxBytes, &out.MessageMaxBytes + *out = new(string) + **out = **in + } + if in.NumPartitions != nil { + in, out := &in.NumPartitions, &out.NumPartitions + *out = new(string) + **out = **in + } + if in.OffsetsRetentionMinutes != nil { + in, out := &in.OffsetsRetentionMinutes, &out.OffsetsRetentionMinutes + *out = new(string) + **out = **in + } + if in.ReplicaFetchMaxBytes != nil { + in, out := &in.ReplicaFetchMaxBytes, &out.ReplicaFetchMaxBytes + *out = new(string) + **out = **in + } + if in.SSLCipherSuites != nil { + in, out := &in.SSLCipherSuites, &out.SSLCipherSuites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SaslEnabledMechanisms != nil { + in, out := &in.SaslEnabledMechanisms, &out.SaslEnabledMechanisms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SocketReceiveBufferBytes != nil { + in, out := &in.SocketReceiveBufferBytes, &out.SocketReceiveBufferBytes + *out = new(string) + **out = **in + } + if in.SocketSendBufferBytes != nil { + in, out := &in.SocketSendBufferBytes, &out.SocketSendBufferBytes + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConfigObservation. +func (in *KafkaConfigObservation) DeepCopy() *KafkaConfigObservation { + if in == nil { + return nil + } + out := new(KafkaConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConfigParameters) DeepCopyInto(out *KafkaConfigParameters) { + *out = *in + if in.AutoCreateTopicsEnable != nil { + in, out := &in.AutoCreateTopicsEnable, &out.AutoCreateTopicsEnable + *out = new(bool) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.DefaultReplicationFactor != nil { + in, out := &in.DefaultReplicationFactor, &out.DefaultReplicationFactor + *out = new(string) + **out = **in + } + if in.LogFlushIntervalMessages != nil { + in, out := &in.LogFlushIntervalMessages, &out.LogFlushIntervalMessages + *out = new(string) + **out = **in + } + if in.LogFlushIntervalMs != nil { + in, out := &in.LogFlushIntervalMs, &out.LogFlushIntervalMs + *out = new(string) + **out = **in + } + if in.LogFlushSchedulerIntervalMs != nil { + in, out := &in.LogFlushSchedulerIntervalMs, &out.LogFlushSchedulerIntervalMs + *out = new(string) + **out = **in + } + if in.LogPreallocate != nil { + in, out := &in.LogPreallocate, &out.LogPreallocate + *out = new(bool) + **out = **in + } + if in.LogRetentionBytes != nil { + in, out := &in.LogRetentionBytes, &out.LogRetentionBytes + *out = new(string) + **out = **in + } + if in.LogRetentionHours != nil { + in, out := &in.LogRetentionHours, &out.LogRetentionHours + *out = new(string) + **out = **in + } + if in.LogRetentionMinutes != nil { + in, out := &in.LogRetentionMinutes, &out.LogRetentionMinutes + *out = new(string) + **out = **in + } + if in.LogRetentionMs != nil { + in, out := &in.LogRetentionMs, &out.LogRetentionMs + *out = new(string) + **out = **in + } + if in.LogSegmentBytes != nil { + in, out := &in.LogSegmentBytes, &out.LogSegmentBytes + *out = new(string) + **out = **in + } + if in.MessageMaxBytes != nil { + in, out := &in.MessageMaxBytes, &out.MessageMaxBytes + *out = new(string) + **out = **in + } + if in.NumPartitions != nil { + in, out := &in.NumPartitions, &out.NumPartitions + *out = new(string) + **out = **in + } + if in.OffsetsRetentionMinutes != nil { + in, out := &in.OffsetsRetentionMinutes, &out.OffsetsRetentionMinutes + *out = new(string) + **out = **in + } + if in.ReplicaFetchMaxBytes != nil { + in, out := &in.ReplicaFetchMaxBytes, &out.ReplicaFetchMaxBytes + *out = new(string) + **out = **in + } + if in.SSLCipherSuites != nil { + in, out := &in.SSLCipherSuites, &out.SSLCipherSuites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SaslEnabledMechanisms != nil { + in, out := &in.SaslEnabledMechanisms, &out.SaslEnabledMechanisms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SocketReceiveBufferBytes != nil { + in, out := &in.SocketReceiveBufferBytes, &out.SocketReceiveBufferBytes + *out = new(string) + **out = **in + } + if in.SocketSendBufferBytes != nil { + in, out := &in.SocketSendBufferBytes, &out.SocketSendBufferBytes + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConfigParameters. +func (in *KafkaConfigParameters) DeepCopy() *KafkaConfigParameters { + if in == nil { + return nil + } + out := new(KafkaConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConnector) DeepCopyInto(out *KafkaConnector) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConnector. +func (in *KafkaConnector) DeepCopy() *KafkaConnector { + if in == nil { + return nil + } + out := new(KafkaConnector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaConnector) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConnectorInitParameters) DeepCopyInto(out *KafkaConnectorInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectorConfigMirrormaker != nil { + in, out := &in.ConnectorConfigMirrormaker, &out.ConnectorConfigMirrormaker + *out = make([]ConnectorConfigMirrormakerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConnectorConfigS3Sink != nil { + in, out := &in.ConnectorConfigS3Sink, &out.ConnectorConfigS3Sink + *out = make([]ConnectorConfigS3SinkInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TasksMax != nil { + in, out := &in.TasksMax, &out.TasksMax + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConnectorInitParameters. +func (in *KafkaConnectorInitParameters) DeepCopy() *KafkaConnectorInitParameters { + if in == nil { + return nil + } + out := new(KafkaConnectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConnectorList) DeepCopyInto(out *KafkaConnectorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaConnector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConnectorList. +func (in *KafkaConnectorList) DeepCopy() *KafkaConnectorList { + if in == nil { + return nil + } + out := new(KafkaConnectorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaConnectorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConnectorObservation) DeepCopyInto(out *KafkaConnectorObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ConnectorConfigMirrormaker != nil { + in, out := &in.ConnectorConfigMirrormaker, &out.ConnectorConfigMirrormaker + *out = make([]ConnectorConfigMirrormakerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConnectorConfigS3Sink != nil { + in, out := &in.ConnectorConfigS3Sink, &out.ConnectorConfigS3Sink + *out = make([]ConnectorConfigS3SinkObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TasksMax != nil { + in, out := &in.TasksMax, &out.TasksMax + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConnectorObservation. +func (in *KafkaConnectorObservation) DeepCopy() *KafkaConnectorObservation { + if in == nil { + return nil + } + out := new(KafkaConnectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConnectorParameters) DeepCopyInto(out *KafkaConnectorParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectorConfigMirrormaker != nil { + in, out := &in.ConnectorConfigMirrormaker, &out.ConnectorConfigMirrormaker + *out = make([]ConnectorConfigMirrormakerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConnectorConfigS3Sink != nil { + in, out := &in.ConnectorConfigS3Sink, &out.ConnectorConfigS3Sink + *out = make([]ConnectorConfigS3SinkParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TasksMax != nil { + in, out := &in.TasksMax, &out.TasksMax + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConnectorParameters. +func (in *KafkaConnectorParameters) DeepCopy() *KafkaConnectorParameters { + if in == nil { + return nil + } + out := new(KafkaConnectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConnectorSpec) DeepCopyInto(out *KafkaConnectorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConnectorSpec. +func (in *KafkaConnectorSpec) DeepCopy() *KafkaConnectorSpec { + if in == nil { + return nil + } + out := new(KafkaConnectorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConnectorStatus) DeepCopyInto(out *KafkaConnectorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConnectorStatus. +func (in *KafkaConnectorStatus) DeepCopy() *KafkaConnectorStatus { + if in == nil { + return nil + } + out := new(KafkaConnectorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaInitParameters) DeepCopyInto(out *KafkaInitParameters) { + *out = *in + if in.AutoOffsetReset != nil { + in, out := &in.AutoOffsetReset, &out.AutoOffsetReset + *out = new(string) + **out = **in + } + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(string) + **out = **in + } + if in.EnableSSLCertificateVerification != nil { + in, out := &in.EnableSSLCertificateVerification, &out.EnableSSLCertificateVerification + *out = new(bool) + **out = **in + } + if in.MaxPollIntervalMs != nil { + in, out := &in.MaxPollIntervalMs, &out.MaxPollIntervalMs + *out = new(float64) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } + if in.SessionTimeoutMs != nil { + in, out := &in.SessionTimeoutMs, &out.SessionTimeoutMs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaInitParameters. +func (in *KafkaInitParameters) DeepCopy() *KafkaInitParameters { + if in == nil { + return nil + } + out := new(KafkaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaObservation) DeepCopyInto(out *KafkaObservation) { + *out = *in + if in.AutoOffsetReset != nil { + in, out := &in.AutoOffsetReset, &out.AutoOffsetReset + *out = new(string) + **out = **in + } + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(string) + **out = **in + } + if in.EnableSSLCertificateVerification != nil { + in, out := &in.EnableSSLCertificateVerification, &out.EnableSSLCertificateVerification + *out = new(bool) + **out = **in + } + if in.MaxPollIntervalMs != nil { + in, out := &in.MaxPollIntervalMs, &out.MaxPollIntervalMs + *out = new(float64) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } + if in.SessionTimeoutMs != nil { + in, out := &in.SessionTimeoutMs, &out.SessionTimeoutMs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaObservation. +func (in *KafkaObservation) DeepCopy() *KafkaObservation { + if in == nil { + return nil + } + out := new(KafkaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaParameters) DeepCopyInto(out *KafkaParameters) { + *out = *in + if in.AutoOffsetReset != nil { + in, out := &in.AutoOffsetReset, &out.AutoOffsetReset + *out = new(string) + **out = **in + } + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(string) + **out = **in + } + if in.EnableSSLCertificateVerification != nil { + in, out := &in.EnableSSLCertificateVerification, &out.EnableSSLCertificateVerification + *out = new(bool) + **out = **in + } + if in.MaxPollIntervalMs != nil { + in, out := &in.MaxPollIntervalMs, &out.MaxPollIntervalMs + *out = new(float64) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } + if in.SessionTimeoutMs != nil { + in, out := &in.SessionTimeoutMs, &out.SessionTimeoutMs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaParameters. +func (in *KafkaParameters) DeepCopy() *KafkaParameters { + if in == nil { + return nil + } + out := new(KafkaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaResourcesInitParameters) DeepCopyInto(out *KafkaResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaResourcesInitParameters. +func (in *KafkaResourcesInitParameters) DeepCopy() *KafkaResourcesInitParameters { + if in == nil { + return nil + } + out := new(KafkaResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaResourcesObservation) DeepCopyInto(out *KafkaResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaResourcesObservation. +func (in *KafkaResourcesObservation) DeepCopy() *KafkaResourcesObservation { + if in == nil { + return nil + } + out := new(KafkaResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaResourcesParameters) DeepCopyInto(out *KafkaResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaResourcesParameters. +func (in *KafkaResourcesParameters) DeepCopy() *KafkaResourcesParameters { + if in == nil { + return nil + } + out := new(KafkaResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopic) DeepCopyInto(out *KafkaTopic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopic. +func (in *KafkaTopic) DeepCopy() *KafkaTopic { + if in == nil { + return nil + } + out := new(KafkaTopic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaTopic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicInitParameters) DeepCopyInto(out *KafkaTopicInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicInitParameters. +func (in *KafkaTopicInitParameters) DeepCopy() *KafkaTopicInitParameters { + if in == nil { + return nil + } + out := new(KafkaTopicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicInitParameters_2) DeepCopyInto(out *KafkaTopicInitParameters_2) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(float64) + **out = **in + } + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.TopicConfig != nil { + in, out := &in.TopicConfig, &out.TopicConfig + *out = make([]KafkaTopicTopicConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicInitParameters_2. +func (in *KafkaTopicInitParameters_2) DeepCopy() *KafkaTopicInitParameters_2 { + if in == nil { + return nil + } + out := new(KafkaTopicInitParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicList) DeepCopyInto(out *KafkaTopicList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaTopic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicList. +func (in *KafkaTopicList) DeepCopy() *KafkaTopicList { + if in == nil { + return nil + } + out := new(KafkaTopicList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaTopicList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicObservation) DeepCopyInto(out *KafkaTopicObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicObservation. +func (in *KafkaTopicObservation) DeepCopy() *KafkaTopicObservation { + if in == nil { + return nil + } + out := new(KafkaTopicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicObservation_2) DeepCopyInto(out *KafkaTopicObservation_2) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(float64) + **out = **in + } + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.TopicConfig != nil { + in, out := &in.TopicConfig, &out.TopicConfig + *out = make([]KafkaTopicTopicConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicObservation_2. +func (in *KafkaTopicObservation_2) DeepCopy() *KafkaTopicObservation_2 { + if in == nil { + return nil + } + out := new(KafkaTopicObservation_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicParameters) DeepCopyInto(out *KafkaTopicParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicParameters. +func (in *KafkaTopicParameters) DeepCopy() *KafkaTopicParameters { + if in == nil { + return nil + } + out := new(KafkaTopicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicParameters_2) DeepCopyInto(out *KafkaTopicParameters_2) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(float64) + **out = **in + } + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.TopicConfig != nil { + in, out := &in.TopicConfig, &out.TopicConfig + *out = make([]KafkaTopicTopicConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicParameters_2. +func (in *KafkaTopicParameters_2) DeepCopy() *KafkaTopicParameters_2 { + if in == nil { + return nil + } + out := new(KafkaTopicParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicSpec) DeepCopyInto(out *KafkaTopicSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicSpec. +func (in *KafkaTopicSpec) DeepCopy() *KafkaTopicSpec { + if in == nil { + return nil + } + out := new(KafkaTopicSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicStatus) DeepCopyInto(out *KafkaTopicStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicStatus. +func (in *KafkaTopicStatus) DeepCopy() *KafkaTopicStatus { + if in == nil { + return nil + } + out := new(KafkaTopicStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicTopicConfigInitParameters) DeepCopyInto(out *KafkaTopicTopicConfigInitParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.DeleteRetentionMs != nil { + in, out := &in.DeleteRetentionMs, &out.DeleteRetentionMs + *out = new(string) + **out = **in + } + if in.FileDeleteDelayMs != nil { + in, out := &in.FileDeleteDelayMs, &out.FileDeleteDelayMs + *out = new(string) + **out = **in + } + if in.FlushMessages != nil { + in, out := &in.FlushMessages, &out.FlushMessages + *out = new(string) + **out = **in + } + if in.FlushMs != nil { + in, out := &in.FlushMs, &out.FlushMs + *out = new(string) + **out = **in + } + if in.MaxMessageBytes != nil { + in, out := &in.MaxMessageBytes, &out.MaxMessageBytes + *out = new(string) + **out = **in + } + if in.MinCompactionLagMs != nil { + in, out := &in.MinCompactionLagMs, &out.MinCompactionLagMs + *out = new(string) + **out = **in + } + if in.MinInsyncReplicas != nil { + in, out := &in.MinInsyncReplicas, &out.MinInsyncReplicas + *out = new(string) + **out = **in + } + if in.Preallocate != nil { + in, out := &in.Preallocate, &out.Preallocate + *out = new(bool) + **out = **in + } + if in.RetentionBytes != nil { + in, out := &in.RetentionBytes, &out.RetentionBytes + *out = new(string) + **out = **in + } + if in.RetentionMs != nil { + in, out := &in.RetentionMs, &out.RetentionMs + *out = new(string) + **out = **in + } + if in.SegmentBytes != nil { + in, out := &in.SegmentBytes, &out.SegmentBytes + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicTopicConfigInitParameters. +func (in *KafkaTopicTopicConfigInitParameters) DeepCopy() *KafkaTopicTopicConfigInitParameters { + if in == nil { + return nil + } + out := new(KafkaTopicTopicConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicTopicConfigObservation) DeepCopyInto(out *KafkaTopicTopicConfigObservation) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.DeleteRetentionMs != nil { + in, out := &in.DeleteRetentionMs, &out.DeleteRetentionMs + *out = new(string) + **out = **in + } + if in.FileDeleteDelayMs != nil { + in, out := &in.FileDeleteDelayMs, &out.FileDeleteDelayMs + *out = new(string) + **out = **in + } + if in.FlushMessages != nil { + in, out := &in.FlushMessages, &out.FlushMessages + *out = new(string) + **out = **in + } + if in.FlushMs != nil { + in, out := &in.FlushMs, &out.FlushMs + *out = new(string) + **out = **in + } + if in.MaxMessageBytes != nil { + in, out := &in.MaxMessageBytes, &out.MaxMessageBytes + *out = new(string) + **out = **in + } + if in.MinCompactionLagMs != nil { + in, out := &in.MinCompactionLagMs, &out.MinCompactionLagMs + *out = new(string) + **out = **in + } + if in.MinInsyncReplicas != nil { + in, out := &in.MinInsyncReplicas, &out.MinInsyncReplicas + *out = new(string) + **out = **in + } + if in.Preallocate != nil { + in, out := &in.Preallocate, &out.Preallocate + *out = new(bool) + **out = **in + } + if in.RetentionBytes != nil { + in, out := &in.RetentionBytes, &out.RetentionBytes + *out = new(string) + **out = **in + } + if in.RetentionMs != nil { + in, out := &in.RetentionMs, &out.RetentionMs + *out = new(string) + **out = **in + } + if in.SegmentBytes != nil { + in, out := &in.SegmentBytes, &out.SegmentBytes + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicTopicConfigObservation. +func (in *KafkaTopicTopicConfigObservation) DeepCopy() *KafkaTopicTopicConfigObservation { + if in == nil { + return nil + } + out := new(KafkaTopicTopicConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicTopicConfigParameters) DeepCopyInto(out *KafkaTopicTopicConfigParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.DeleteRetentionMs != nil { + in, out := &in.DeleteRetentionMs, &out.DeleteRetentionMs + *out = new(string) + **out = **in + } + if in.FileDeleteDelayMs != nil { + in, out := &in.FileDeleteDelayMs, &out.FileDeleteDelayMs + *out = new(string) + **out = **in + } + if in.FlushMessages != nil { + in, out := &in.FlushMessages, &out.FlushMessages + *out = new(string) + **out = **in + } + if in.FlushMs != nil { + in, out := &in.FlushMs, &out.FlushMs + *out = new(string) + **out = **in + } + if in.MaxMessageBytes != nil { + in, out := &in.MaxMessageBytes, &out.MaxMessageBytes + *out = new(string) + **out = **in + } + if in.MinCompactionLagMs != nil { + in, out := &in.MinCompactionLagMs, &out.MinCompactionLagMs + *out = new(string) + **out = **in + } + if in.MinInsyncReplicas != nil { + in, out := &in.MinInsyncReplicas, &out.MinInsyncReplicas + *out = new(string) + **out = **in + } + if in.Preallocate != nil { + in, out := &in.Preallocate, &out.Preallocate + *out = new(bool) + **out = **in + } + if in.RetentionBytes != nil { + in, out := &in.RetentionBytes, &out.RetentionBytes + *out = new(string) + **out = **in + } + if in.RetentionMs != nil { + in, out := &in.RetentionMs, &out.RetentionMs + *out = new(string) + **out = **in + } + if in.SegmentBytes != nil { + in, out := &in.SegmentBytes, &out.SegmentBytes + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicTopicConfigParameters. +func (in *KafkaTopicTopicConfigParameters) DeepCopy() *KafkaTopicTopicConfigParameters { + if in == nil { + return nil + } + out := new(KafkaTopicTopicConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUser) DeepCopyInto(out *KafkaUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUser. +func (in *KafkaUser) DeepCopy() *KafkaUser { + if in == nil { + return nil + } + out := new(KafkaUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUserInitParameters) DeepCopyInto(out *KafkaUserInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]KafkaUserPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUserInitParameters. +func (in *KafkaUserInitParameters) DeepCopy() *KafkaUserInitParameters { + if in == nil { + return nil + } + out := new(KafkaUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUserList) DeepCopyInto(out *KafkaUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUserList. +func (in *KafkaUserList) DeepCopy() *KafkaUserList { + if in == nil { + return nil + } + out := new(KafkaUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUserObservation) DeepCopyInto(out *KafkaUserObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]KafkaUserPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUserObservation. +func (in *KafkaUserObservation) DeepCopy() *KafkaUserObservation { + if in == nil { + return nil + } + out := new(KafkaUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUserParameters) DeepCopyInto(out *KafkaUserParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]KafkaUserPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUserParameters. +func (in *KafkaUserParameters) DeepCopy() *KafkaUserParameters { + if in == nil { + return nil + } + out := new(KafkaUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUserPermissionInitParameters) DeepCopyInto(out *KafkaUserPermissionInitParameters) { + *out = *in + if in.AllowHosts != nil { + in, out := &in.AllowHosts, &out.AllowHosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUserPermissionInitParameters. +func (in *KafkaUserPermissionInitParameters) DeepCopy() *KafkaUserPermissionInitParameters { + if in == nil { + return nil + } + out := new(KafkaUserPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUserPermissionObservation) DeepCopyInto(out *KafkaUserPermissionObservation) { + *out = *in + if in.AllowHosts != nil { + in, out := &in.AllowHosts, &out.AllowHosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUserPermissionObservation. +func (in *KafkaUserPermissionObservation) DeepCopy() *KafkaUserPermissionObservation { + if in == nil { + return nil + } + out := new(KafkaUserPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUserPermissionParameters) DeepCopyInto(out *KafkaUserPermissionParameters) { + *out = *in + if in.AllowHosts != nil { + in, out := &in.AllowHosts, &out.AllowHosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUserPermissionParameters. +func (in *KafkaUserPermissionParameters) DeepCopy() *KafkaUserPermissionParameters { + if in == nil { + return nil + } + out := new(KafkaUserPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUserSpec) DeepCopyInto(out *KafkaUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUserSpec. +func (in *KafkaUserSpec) DeepCopy() *KafkaUserSpec { + if in == nil { + return nil + } + out := new(KafkaUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUserStatus) DeepCopyInto(out *KafkaUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUserStatus. +func (in *KafkaUserStatus) DeepCopy() *KafkaUserStatus { + if in == nil { + return nil + } + out := new(KafkaUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KmipInitParameters) DeepCopyInto(out *KmipInitParameters) { + *out = *in + if in.ClientCertificate != nil { + in, out := &in.ClientCertificate, &out.ClientCertificate + *out = new(string) + **out = **in + } + if in.KeyIdentifier != nil { + in, out := &in.KeyIdentifier, &out.KeyIdentifier + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ServerCA != nil { + in, out := &in.ServerCA, &out.ServerCA + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KmipInitParameters. +func (in *KmipInitParameters) DeepCopy() *KmipInitParameters { + if in == nil { + return nil + } + out := new(KmipInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KmipObservation) DeepCopyInto(out *KmipObservation) { + *out = *in + if in.ClientCertificate != nil { + in, out := &in.ClientCertificate, &out.ClientCertificate + *out = new(string) + **out = **in + } + if in.KeyIdentifier != nil { + in, out := &in.KeyIdentifier, &out.KeyIdentifier + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ServerCA != nil { + in, out := &in.ServerCA, &out.ServerCA + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KmipObservation. +func (in *KmipObservation) DeepCopy() *KmipObservation { + if in == nil { + return nil + } + out := new(KmipObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KmipParameters) DeepCopyInto(out *KmipParameters) { + *out = *in + if in.ClientCertificate != nil { + in, out := &in.ClientCertificate, &out.ClientCertificate + *out = new(string) + **out = **in + } + if in.KeyIdentifier != nil { + in, out := &in.KeyIdentifier, &out.KeyIdentifier + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ServerCA != nil { + in, out := &in.ServerCA, &out.ServerCA + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KmipParameters. +func (in *KmipParameters) DeepCopy() *KmipParameters { + if in == nil { + return nil + } + out := new(KmipParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MLModelInitParameters) DeepCopyInto(out *MLModelInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MLModelInitParameters. +func (in *MLModelInitParameters) DeepCopy() *MLModelInitParameters { + if in == nil { + return nil + } + out := new(MLModelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MLModelObservation) DeepCopyInto(out *MLModelObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MLModelObservation. +func (in *MLModelObservation) DeepCopy() *MLModelObservation { + if in == nil { + return nil + } + out := new(MLModelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MLModelParameters) DeepCopyInto(out *MLModelParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MLModelParameters. +func (in *MLModelParameters) DeepCopy() *MLModelParameters { + if in == nil { + return nil + } + out := new(MLModelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowInitParameters) DeepCopyInto(out *MaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowInitParameters. +func (in *MaintenanceWindowInitParameters) DeepCopy() *MaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowObservation) DeepCopyInto(out *MaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowObservation. +func (in *MaintenanceWindowObservation) DeepCopy() *MaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowParameters) DeepCopyInto(out *MaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowParameters. +func (in *MaintenanceWindowParameters) DeepCopy() *MaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterHostsInitParameters) DeepCopyInto(out *MasterHostsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterHostsInitParameters. +func (in *MasterHostsInitParameters) DeepCopy() *MasterHostsInitParameters { + if in == nil { + return nil + } + out := new(MasterHostsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterHostsObservation) DeepCopyInto(out *MasterHostsObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterHostsObservation. +func (in *MasterHostsObservation) DeepCopy() *MasterHostsObservation { + if in == nil { + return nil + } + out := new(MasterHostsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterHostsParameters) DeepCopyInto(out *MasterHostsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterHostsParameters. +func (in *MasterHostsParameters) DeepCopy() *MasterHostsParameters { + if in == nil { + return nil + } + out := new(MasterHostsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterNodeInitParameters) DeepCopyInto(out *MasterNodeInitParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MasterNodeResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterNodeInitParameters. +func (in *MasterNodeInitParameters) DeepCopy() *MasterNodeInitParameters { + if in == nil { + return nil + } + out := new(MasterNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterNodeObservation) DeepCopyInto(out *MasterNodeObservation) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MasterNodeResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterNodeObservation. +func (in *MasterNodeObservation) DeepCopy() *MasterNodeObservation { + if in == nil { + return nil + } + out := new(MasterNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterNodeParameters) DeepCopyInto(out *MasterNodeParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MasterNodeResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterNodeParameters. +func (in *MasterNodeParameters) DeepCopy() *MasterNodeParameters { + if in == nil { + return nil + } + out := new(MasterNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterNodeResourcesInitParameters) DeepCopyInto(out *MasterNodeResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterNodeResourcesInitParameters. +func (in *MasterNodeResourcesInitParameters) DeepCopy() *MasterNodeResourcesInitParameters { + if in == nil { + return nil + } + out := new(MasterNodeResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterNodeResourcesObservation) DeepCopyInto(out *MasterNodeResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterNodeResourcesObservation. +func (in *MasterNodeResourcesObservation) DeepCopy() *MasterNodeResourcesObservation { + if in == nil { + return nil + } + out := new(MasterNodeResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterNodeResourcesParameters) DeepCopyInto(out *MasterNodeResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterNodeResourcesParameters. +func (in *MasterNodeResourcesParameters) DeepCopy() *MasterNodeResourcesParameters { + if in == nil { + return nil + } + out := new(MasterNodeResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterSubclusterInitParameters) DeepCopyInto(out *MasterSubclusterInitParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MasterSubclusterResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSubclusterInitParameters. +func (in *MasterSubclusterInitParameters) DeepCopy() *MasterSubclusterInitParameters { + if in == nil { + return nil + } + out := new(MasterSubclusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterSubclusterObservation) DeepCopyInto(out *MasterSubclusterObservation) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MasterSubclusterResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSubclusterObservation. +func (in *MasterSubclusterObservation) DeepCopy() *MasterSubclusterObservation { + if in == nil { + return nil + } + out := new(MasterSubclusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterSubclusterParameters) DeepCopyInto(out *MasterSubclusterParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MasterSubclusterResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSubclusterParameters. +func (in *MasterSubclusterParameters) DeepCopy() *MasterSubclusterParameters { + if in == nil { + return nil + } + out := new(MasterSubclusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterSubclusterResourcesInitParameters) DeepCopyInto(out *MasterSubclusterResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSubclusterResourcesInitParameters. +func (in *MasterSubclusterResourcesInitParameters) DeepCopy() *MasterSubclusterResourcesInitParameters { + if in == nil { + return nil + } + out := new(MasterSubclusterResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterSubclusterResourcesObservation) DeepCopyInto(out *MasterSubclusterResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSubclusterResourcesObservation. +func (in *MasterSubclusterResourcesObservation) DeepCopy() *MasterSubclusterResourcesObservation { + if in == nil { + return nil + } + out := new(MasterSubclusterResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterSubclusterResourcesParameters) DeepCopyInto(out *MasterSubclusterResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSubclusterResourcesParameters. +func (in *MasterSubclusterResourcesParameters) DeepCopy() *MasterSubclusterResourcesParameters { + if in == nil { + return nil + } + out := new(MasterSubclusterResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MergeTreeInitParameters) DeepCopyInto(out *MergeTreeInitParameters) { + *out = *in + if in.AllowRemoteFsZeroCopyReplication != nil { + in, out := &in.AllowRemoteFsZeroCopyReplication, &out.AllowRemoteFsZeroCopyReplication + *out = new(bool) + **out = **in + } + if in.CheckSampleColumnIsCorrect != nil { + in, out := &in.CheckSampleColumnIsCorrect, &out.CheckSampleColumnIsCorrect + *out = new(bool) + **out = **in + } + if in.CleanupDelayPeriod != nil { + in, out := &in.CleanupDelayPeriod, &out.CleanupDelayPeriod + *out = new(float64) + **out = **in + } + if in.InactivePartsToDelayInsert != nil { + in, out := &in.InactivePartsToDelayInsert, &out.InactivePartsToDelayInsert + *out = new(float64) + **out = **in + } + if in.InactivePartsToThrowInsert != nil { + in, out := &in.InactivePartsToThrowInsert, &out.InactivePartsToThrowInsert + *out = new(float64) + **out = **in + } + if in.MaxAvgPartSizeForTooManyParts != nil { + in, out := &in.MaxAvgPartSizeForTooManyParts, &out.MaxAvgPartSizeForTooManyParts + *out = new(float64) + **out = **in + } + if in.MaxBytesToMergeAtMaxSpaceInPool != nil { + in, out := &in.MaxBytesToMergeAtMaxSpaceInPool, &out.MaxBytesToMergeAtMaxSpaceInPool + *out = new(float64) + **out = **in + } + if in.MaxBytesToMergeAtMinSpaceInPool != nil { + in, out := &in.MaxBytesToMergeAtMinSpaceInPool, &out.MaxBytesToMergeAtMinSpaceInPool + *out = new(float64) + **out = **in + } + if in.MaxCleanupDelayPeriod != nil { + in, out := &in.MaxCleanupDelayPeriod, &out.MaxCleanupDelayPeriod + *out = new(float64) + **out = **in + } + if in.MaxMergeSelectingSleepMs != nil { + in, out := &in.MaxMergeSelectingSleepMs, &out.MaxMergeSelectingSleepMs + *out = new(float64) + **out = **in + } + if in.MaxNumberOfMergesWithTTLInPool != nil { + in, out := &in.MaxNumberOfMergesWithTTLInPool, &out.MaxNumberOfMergesWithTTLInPool + *out = new(float64) + **out = **in + } + if in.MaxPartsInTotal != nil { + in, out := &in.MaxPartsInTotal, &out.MaxPartsInTotal + *out = new(float64) + **out = **in + } + if in.MaxReplicatedMergesInQueue != nil { + in, out := &in.MaxReplicatedMergesInQueue, &out.MaxReplicatedMergesInQueue + *out = new(float64) + **out = **in + } + if in.MergeMaxBlockSize != nil { + in, out := &in.MergeMaxBlockSize, &out.MergeMaxBlockSize + *out = new(float64) + **out = **in + } + if in.MergeSelectingSleepMs != nil { + in, out := &in.MergeSelectingSleepMs, &out.MergeSelectingSleepMs + *out = new(float64) + **out = **in + } + if in.MergeWithRecompressionTTLTimeout != nil { + in, out := &in.MergeWithRecompressionTTLTimeout, &out.MergeWithRecompressionTTLTimeout + *out = new(float64) + **out = **in + } + if in.MergeWithTTLTimeout != nil { + in, out := &in.MergeWithTTLTimeout, &out.MergeWithTTLTimeout + *out = new(float64) + **out = **in + } + if in.MinAgeToForceMergeOnPartitionOnly != nil { + in, out := &in.MinAgeToForceMergeOnPartitionOnly, &out.MinAgeToForceMergeOnPartitionOnly + *out = new(bool) + **out = **in + } + if in.MinAgeToForceMergeSeconds != nil { + in, out := &in.MinAgeToForceMergeSeconds, &out.MinAgeToForceMergeSeconds + *out = new(float64) + **out = **in + } + if in.MinBytesForWidePart != nil { + in, out := &in.MinBytesForWidePart, &out.MinBytesForWidePart + *out = new(float64) + **out = **in + } + if in.MinRowsForWidePart != nil { + in, out := &in.MinRowsForWidePart, &out.MinRowsForWidePart + *out = new(float64) + **out = **in + } + if in.NumberOfFreeEntriesInPoolToExecuteMutation != nil { + in, out := &in.NumberOfFreeEntriesInPoolToExecuteMutation, &out.NumberOfFreeEntriesInPoolToExecuteMutation + *out = new(float64) + **out = **in + } + if in.NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge != nil { + in, out := &in.NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge, &out.NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge + *out = new(float64) + **out = **in + } + if in.PartsToDelayInsert != nil { + in, out := &in.PartsToDelayInsert, &out.PartsToDelayInsert + *out = new(float64) + **out = **in + } + if in.PartsToThrowInsert != nil { + in, out := &in.PartsToThrowInsert, &out.PartsToThrowInsert + *out = new(float64) + **out = **in + } + if in.ReplicatedDeduplicationWindow != nil { + in, out := &in.ReplicatedDeduplicationWindow, &out.ReplicatedDeduplicationWindow + *out = new(float64) + **out = **in + } + if in.ReplicatedDeduplicationWindowSeconds != nil { + in, out := &in.ReplicatedDeduplicationWindowSeconds, &out.ReplicatedDeduplicationWindowSeconds + *out = new(float64) + **out = **in + } + if in.TTLOnlyDropParts != nil { + in, out := &in.TTLOnlyDropParts, &out.TTLOnlyDropParts + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MergeTreeInitParameters. +func (in *MergeTreeInitParameters) DeepCopy() *MergeTreeInitParameters { + if in == nil { + return nil + } + out := new(MergeTreeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MergeTreeObservation) DeepCopyInto(out *MergeTreeObservation) { + *out = *in + if in.AllowRemoteFsZeroCopyReplication != nil { + in, out := &in.AllowRemoteFsZeroCopyReplication, &out.AllowRemoteFsZeroCopyReplication + *out = new(bool) + **out = **in + } + if in.CheckSampleColumnIsCorrect != nil { + in, out := &in.CheckSampleColumnIsCorrect, &out.CheckSampleColumnIsCorrect + *out = new(bool) + **out = **in + } + if in.CleanupDelayPeriod != nil { + in, out := &in.CleanupDelayPeriod, &out.CleanupDelayPeriod + *out = new(float64) + **out = **in + } + if in.InactivePartsToDelayInsert != nil { + in, out := &in.InactivePartsToDelayInsert, &out.InactivePartsToDelayInsert + *out = new(float64) + **out = **in + } + if in.InactivePartsToThrowInsert != nil { + in, out := &in.InactivePartsToThrowInsert, &out.InactivePartsToThrowInsert + *out = new(float64) + **out = **in + } + if in.MaxAvgPartSizeForTooManyParts != nil { + in, out := &in.MaxAvgPartSizeForTooManyParts, &out.MaxAvgPartSizeForTooManyParts + *out = new(float64) + **out = **in + } + if in.MaxBytesToMergeAtMaxSpaceInPool != nil { + in, out := &in.MaxBytesToMergeAtMaxSpaceInPool, &out.MaxBytesToMergeAtMaxSpaceInPool + *out = new(float64) + **out = **in + } + if in.MaxBytesToMergeAtMinSpaceInPool != nil { + in, out := &in.MaxBytesToMergeAtMinSpaceInPool, &out.MaxBytesToMergeAtMinSpaceInPool + *out = new(float64) + **out = **in + } + if in.MaxCleanupDelayPeriod != nil { + in, out := &in.MaxCleanupDelayPeriod, &out.MaxCleanupDelayPeriod + *out = new(float64) + **out = **in + } + if in.MaxMergeSelectingSleepMs != nil { + in, out := &in.MaxMergeSelectingSleepMs, &out.MaxMergeSelectingSleepMs + *out = new(float64) + **out = **in + } + if in.MaxNumberOfMergesWithTTLInPool != nil { + in, out := &in.MaxNumberOfMergesWithTTLInPool, &out.MaxNumberOfMergesWithTTLInPool + *out = new(float64) + **out = **in + } + if in.MaxPartsInTotal != nil { + in, out := &in.MaxPartsInTotal, &out.MaxPartsInTotal + *out = new(float64) + **out = **in + } + if in.MaxReplicatedMergesInQueue != nil { + in, out := &in.MaxReplicatedMergesInQueue, &out.MaxReplicatedMergesInQueue + *out = new(float64) + **out = **in + } + if in.MergeMaxBlockSize != nil { + in, out := &in.MergeMaxBlockSize, &out.MergeMaxBlockSize + *out = new(float64) + **out = **in + } + if in.MergeSelectingSleepMs != nil { + in, out := &in.MergeSelectingSleepMs, &out.MergeSelectingSleepMs + *out = new(float64) + **out = **in + } + if in.MergeWithRecompressionTTLTimeout != nil { + in, out := &in.MergeWithRecompressionTTLTimeout, &out.MergeWithRecompressionTTLTimeout + *out = new(float64) + **out = **in + } + if in.MergeWithTTLTimeout != nil { + in, out := &in.MergeWithTTLTimeout, &out.MergeWithTTLTimeout + *out = new(float64) + **out = **in + } + if in.MinAgeToForceMergeOnPartitionOnly != nil { + in, out := &in.MinAgeToForceMergeOnPartitionOnly, &out.MinAgeToForceMergeOnPartitionOnly + *out = new(bool) + **out = **in + } + if in.MinAgeToForceMergeSeconds != nil { + in, out := &in.MinAgeToForceMergeSeconds, &out.MinAgeToForceMergeSeconds + *out = new(float64) + **out = **in + } + if in.MinBytesForWidePart != nil { + in, out := &in.MinBytesForWidePart, &out.MinBytesForWidePart + *out = new(float64) + **out = **in + } + if in.MinRowsForWidePart != nil { + in, out := &in.MinRowsForWidePart, &out.MinRowsForWidePart + *out = new(float64) + **out = **in + } + if in.NumberOfFreeEntriesInPoolToExecuteMutation != nil { + in, out := &in.NumberOfFreeEntriesInPoolToExecuteMutation, &out.NumberOfFreeEntriesInPoolToExecuteMutation + *out = new(float64) + **out = **in + } + if in.NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge != nil { + in, out := &in.NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge, &out.NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge + *out = new(float64) + **out = **in + } + if in.PartsToDelayInsert != nil { + in, out := &in.PartsToDelayInsert, &out.PartsToDelayInsert + *out = new(float64) + **out = **in + } + if in.PartsToThrowInsert != nil { + in, out := &in.PartsToThrowInsert, &out.PartsToThrowInsert + *out = new(float64) + **out = **in + } + if in.ReplicatedDeduplicationWindow != nil { + in, out := &in.ReplicatedDeduplicationWindow, &out.ReplicatedDeduplicationWindow + *out = new(float64) + **out = **in + } + if in.ReplicatedDeduplicationWindowSeconds != nil { + in, out := &in.ReplicatedDeduplicationWindowSeconds, &out.ReplicatedDeduplicationWindowSeconds + *out = new(float64) + **out = **in + } + if in.TTLOnlyDropParts != nil { + in, out := &in.TTLOnlyDropParts, &out.TTLOnlyDropParts + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MergeTreeObservation. +func (in *MergeTreeObservation) DeepCopy() *MergeTreeObservation { + if in == nil { + return nil + } + out := new(MergeTreeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MergeTreeParameters) DeepCopyInto(out *MergeTreeParameters) { + *out = *in + if in.AllowRemoteFsZeroCopyReplication != nil { + in, out := &in.AllowRemoteFsZeroCopyReplication, &out.AllowRemoteFsZeroCopyReplication + *out = new(bool) + **out = **in + } + if in.CheckSampleColumnIsCorrect != nil { + in, out := &in.CheckSampleColumnIsCorrect, &out.CheckSampleColumnIsCorrect + *out = new(bool) + **out = **in + } + if in.CleanupDelayPeriod != nil { + in, out := &in.CleanupDelayPeriod, &out.CleanupDelayPeriod + *out = new(float64) + **out = **in + } + if in.InactivePartsToDelayInsert != nil { + in, out := &in.InactivePartsToDelayInsert, &out.InactivePartsToDelayInsert + *out = new(float64) + **out = **in + } + if in.InactivePartsToThrowInsert != nil { + in, out := &in.InactivePartsToThrowInsert, &out.InactivePartsToThrowInsert + *out = new(float64) + **out = **in + } + if in.MaxAvgPartSizeForTooManyParts != nil { + in, out := &in.MaxAvgPartSizeForTooManyParts, &out.MaxAvgPartSizeForTooManyParts + *out = new(float64) + **out = **in + } + if in.MaxBytesToMergeAtMaxSpaceInPool != nil { + in, out := &in.MaxBytesToMergeAtMaxSpaceInPool, &out.MaxBytesToMergeAtMaxSpaceInPool + *out = new(float64) + **out = **in + } + if in.MaxBytesToMergeAtMinSpaceInPool != nil { + in, out := &in.MaxBytesToMergeAtMinSpaceInPool, &out.MaxBytesToMergeAtMinSpaceInPool + *out = new(float64) + **out = **in + } + if in.MaxCleanupDelayPeriod != nil { + in, out := &in.MaxCleanupDelayPeriod, &out.MaxCleanupDelayPeriod + *out = new(float64) + **out = **in + } + if in.MaxMergeSelectingSleepMs != nil { + in, out := &in.MaxMergeSelectingSleepMs, &out.MaxMergeSelectingSleepMs + *out = new(float64) + **out = **in + } + if in.MaxNumberOfMergesWithTTLInPool != nil { + in, out := &in.MaxNumberOfMergesWithTTLInPool, &out.MaxNumberOfMergesWithTTLInPool + *out = new(float64) + **out = **in + } + if in.MaxPartsInTotal != nil { + in, out := &in.MaxPartsInTotal, &out.MaxPartsInTotal + *out = new(float64) + **out = **in + } + if in.MaxReplicatedMergesInQueue != nil { + in, out := &in.MaxReplicatedMergesInQueue, &out.MaxReplicatedMergesInQueue + *out = new(float64) + **out = **in + } + if in.MergeMaxBlockSize != nil { + in, out := &in.MergeMaxBlockSize, &out.MergeMaxBlockSize + *out = new(float64) + **out = **in + } + if in.MergeSelectingSleepMs != nil { + in, out := &in.MergeSelectingSleepMs, &out.MergeSelectingSleepMs + *out = new(float64) + **out = **in + } + if in.MergeWithRecompressionTTLTimeout != nil { + in, out := &in.MergeWithRecompressionTTLTimeout, &out.MergeWithRecompressionTTLTimeout + *out = new(float64) + **out = **in + } + if in.MergeWithTTLTimeout != nil { + in, out := &in.MergeWithTTLTimeout, &out.MergeWithTTLTimeout + *out = new(float64) + **out = **in + } + if in.MinAgeToForceMergeOnPartitionOnly != nil { + in, out := &in.MinAgeToForceMergeOnPartitionOnly, &out.MinAgeToForceMergeOnPartitionOnly + *out = new(bool) + **out = **in + } + if in.MinAgeToForceMergeSeconds != nil { + in, out := &in.MinAgeToForceMergeSeconds, &out.MinAgeToForceMergeSeconds + *out = new(float64) + **out = **in + } + if in.MinBytesForWidePart != nil { + in, out := &in.MinBytesForWidePart, &out.MinBytesForWidePart + *out = new(float64) + **out = **in + } + if in.MinRowsForWidePart != nil { + in, out := &in.MinRowsForWidePart, &out.MinRowsForWidePart + *out = new(float64) + **out = **in + } + if in.NumberOfFreeEntriesInPoolToExecuteMutation != nil { + in, out := &in.NumberOfFreeEntriesInPoolToExecuteMutation, &out.NumberOfFreeEntriesInPoolToExecuteMutation + *out = new(float64) + **out = **in + } + if in.NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge != nil { + in, out := &in.NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge, &out.NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge + *out = new(float64) + **out = **in + } + if in.PartsToDelayInsert != nil { + in, out := &in.PartsToDelayInsert, &out.PartsToDelayInsert + *out = new(float64) + **out = **in + } + if in.PartsToThrowInsert != nil { + in, out := &in.PartsToThrowInsert, &out.PartsToThrowInsert + *out = new(float64) + **out = **in + } + if in.ReplicatedDeduplicationWindow != nil { + in, out := &in.ReplicatedDeduplicationWindow, &out.ReplicatedDeduplicationWindow + *out = new(float64) + **out = **in + } + if in.ReplicatedDeduplicationWindowSeconds != nil { + in, out := &in.ReplicatedDeduplicationWindowSeconds, &out.ReplicatedDeduplicationWindowSeconds + *out = new(float64) + **out = **in + } + if in.TTLOnlyDropParts != nil { + in, out := &in.TTLOnlyDropParts, &out.TTLOnlyDropParts + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MergeTreeParameters. +func (in *MergeTreeParameters) DeepCopy() *MergeTreeParameters { + if in == nil { + return nil + } + out := new(MergeTreeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongocfgInitParameters) DeepCopyInto(out *MongocfgInitParameters) { + *out = *in + if in.Net != nil { + in, out := &in.Net, &out.Net + *out = make([]NetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OperationProfiling != nil { + in, out := &in.OperationProfiling, &out.OperationProfiling + *out = make([]OperationProfilingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]StorageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongocfgInitParameters. +func (in *MongocfgInitParameters) DeepCopy() *MongocfgInitParameters { + if in == nil { + return nil + } + out := new(MongocfgInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongocfgObservation) DeepCopyInto(out *MongocfgObservation) { + *out = *in + if in.Net != nil { + in, out := &in.Net, &out.Net + *out = make([]NetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OperationProfiling != nil { + in, out := &in.OperationProfiling, &out.OperationProfiling + *out = make([]OperationProfilingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]StorageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongocfgObservation. +func (in *MongocfgObservation) DeepCopy() *MongocfgObservation { + if in == nil { + return nil + } + out := new(MongocfgObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongocfgParameters) DeepCopyInto(out *MongocfgParameters) { + *out = *in + if in.Net != nil { + in, out := &in.Net, &out.Net + *out = make([]NetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OperationProfiling != nil { + in, out := &in.OperationProfiling, &out.OperationProfiling + *out = make([]OperationProfilingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]StorageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongocfgParameters. +func (in *MongocfgParameters) DeepCopy() *MongocfgParameters { + if in == nil { + return nil + } + out := new(MongocfgParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodInitParameters) DeepCopyInto(out *MongodInitParameters) { + *out = *in + if in.AuditLog != nil { + in, out := &in.AuditLog, &out.AuditLog + *out = make([]AuditLogInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Net != nil { + in, out := &in.Net, &out.Net + *out = make([]MongodNetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OperationProfiling != nil { + in, out := &in.OperationProfiling, &out.OperationProfiling + *out = make([]MongodOperationProfilingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = make([]SecurityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SetParameter != nil { + in, out := &in.SetParameter, &out.SetParameter + *out = make([]SetParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]MongodStorageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodInitParameters. +func (in *MongodInitParameters) DeepCopy() *MongodInitParameters { + if in == nil { + return nil + } + out := new(MongodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodNetInitParameters) DeepCopyInto(out *MongodNetInitParameters) { + *out = *in + if in.Compressors != nil { + in, out := &in.Compressors, &out.Compressors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxIncomingConnections != nil { + in, out := &in.MaxIncomingConnections, &out.MaxIncomingConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodNetInitParameters. +func (in *MongodNetInitParameters) DeepCopy() *MongodNetInitParameters { + if in == nil { + return nil + } + out := new(MongodNetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodNetObservation) DeepCopyInto(out *MongodNetObservation) { + *out = *in + if in.Compressors != nil { + in, out := &in.Compressors, &out.Compressors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxIncomingConnections != nil { + in, out := &in.MaxIncomingConnections, &out.MaxIncomingConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodNetObservation. +func (in *MongodNetObservation) DeepCopy() *MongodNetObservation { + if in == nil { + return nil + } + out := new(MongodNetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodNetParameters) DeepCopyInto(out *MongodNetParameters) { + *out = *in + if in.Compressors != nil { + in, out := &in.Compressors, &out.Compressors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxIncomingConnections != nil { + in, out := &in.MaxIncomingConnections, &out.MaxIncomingConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodNetParameters. +func (in *MongodNetParameters) DeepCopy() *MongodNetParameters { + if in == nil { + return nil + } + out := new(MongodNetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodObservation) DeepCopyInto(out *MongodObservation) { + *out = *in + if in.AuditLog != nil { + in, out := &in.AuditLog, &out.AuditLog + *out = make([]AuditLogObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Net != nil { + in, out := &in.Net, &out.Net + *out = make([]MongodNetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OperationProfiling != nil { + in, out := &in.OperationProfiling, &out.OperationProfiling + *out = make([]MongodOperationProfilingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = make([]SecurityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SetParameter != nil { + in, out := &in.SetParameter, &out.SetParameter + *out = make([]SetParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]MongodStorageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodObservation. +func (in *MongodObservation) DeepCopy() *MongodObservation { + if in == nil { + return nil + } + out := new(MongodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodOperationProfilingInitParameters) DeepCopyInto(out *MongodOperationProfilingInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.SlowOpSampleRate != nil { + in, out := &in.SlowOpSampleRate, &out.SlowOpSampleRate + *out = new(float64) + **out = **in + } + if in.SlowOpThreshold != nil { + in, out := &in.SlowOpThreshold, &out.SlowOpThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodOperationProfilingInitParameters. +func (in *MongodOperationProfilingInitParameters) DeepCopy() *MongodOperationProfilingInitParameters { + if in == nil { + return nil + } + out := new(MongodOperationProfilingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodOperationProfilingObservation) DeepCopyInto(out *MongodOperationProfilingObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.SlowOpSampleRate != nil { + in, out := &in.SlowOpSampleRate, &out.SlowOpSampleRate + *out = new(float64) + **out = **in + } + if in.SlowOpThreshold != nil { + in, out := &in.SlowOpThreshold, &out.SlowOpThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodOperationProfilingObservation. +func (in *MongodOperationProfilingObservation) DeepCopy() *MongodOperationProfilingObservation { + if in == nil { + return nil + } + out := new(MongodOperationProfilingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodOperationProfilingParameters) DeepCopyInto(out *MongodOperationProfilingParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.SlowOpSampleRate != nil { + in, out := &in.SlowOpSampleRate, &out.SlowOpSampleRate + *out = new(float64) + **out = **in + } + if in.SlowOpThreshold != nil { + in, out := &in.SlowOpThreshold, &out.SlowOpThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodOperationProfilingParameters. +func (in *MongodOperationProfilingParameters) DeepCopy() *MongodOperationProfilingParameters { + if in == nil { + return nil + } + out := new(MongodOperationProfilingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodParameters) DeepCopyInto(out *MongodParameters) { + *out = *in + if in.AuditLog != nil { + in, out := &in.AuditLog, &out.AuditLog + *out = make([]AuditLogParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Net != nil { + in, out := &in.Net, &out.Net + *out = make([]MongodNetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OperationProfiling != nil { + in, out := &in.OperationProfiling, &out.OperationProfiling + *out = make([]MongodOperationProfilingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = make([]SecurityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SetParameter != nil { + in, out := &in.SetParameter, &out.SetParameter + *out = make([]SetParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]MongodStorageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodParameters. +func (in *MongodParameters) DeepCopy() *MongodParameters { + if in == nil { + return nil + } + out := new(MongodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodStorageInitParameters) DeepCopyInto(out *MongodStorageInitParameters) { + *out = *in + if in.Journal != nil { + in, out := &in.Journal, &out.Journal + *out = make([]JournalInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WiredTiger != nil { + in, out := &in.WiredTiger, &out.WiredTiger + *out = make([]StorageWiredTigerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodStorageInitParameters. +func (in *MongodStorageInitParameters) DeepCopy() *MongodStorageInitParameters { + if in == nil { + return nil + } + out := new(MongodStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodStorageObservation) DeepCopyInto(out *MongodStorageObservation) { + *out = *in + if in.Journal != nil { + in, out := &in.Journal, &out.Journal + *out = make([]JournalObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WiredTiger != nil { + in, out := &in.WiredTiger, &out.WiredTiger + *out = make([]StorageWiredTigerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodStorageObservation. +func (in *MongodStorageObservation) DeepCopy() *MongodStorageObservation { + if in == nil { + return nil + } + out := new(MongodStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodStorageParameters) DeepCopyInto(out *MongodStorageParameters) { + *out = *in + if in.Journal != nil { + in, out := &in.Journal, &out.Journal + *out = make([]JournalParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WiredTiger != nil { + in, out := &in.WiredTiger, &out.WiredTiger + *out = make([]StorageWiredTigerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodStorageParameters. +func (in *MongodStorageParameters) DeepCopy() *MongodStorageParameters { + if in == nil { + return nil + } + out := new(MongodStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbCluster) DeepCopyInto(out *MongodbCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbCluster. +func (in *MongodbCluster) DeepCopy() *MongodbCluster { + if in == nil { + return nil + } + out := new(MongodbCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongodbCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterDatabaseInitParameters) DeepCopyInto(out *MongodbClusterDatabaseInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterDatabaseInitParameters. +func (in *MongodbClusterDatabaseInitParameters) DeepCopy() *MongodbClusterDatabaseInitParameters { + if in == nil { + return nil + } + out := new(MongodbClusterDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterDatabaseObservation) DeepCopyInto(out *MongodbClusterDatabaseObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterDatabaseObservation. +func (in *MongodbClusterDatabaseObservation) DeepCopy() *MongodbClusterDatabaseObservation { + if in == nil { + return nil + } + out := new(MongodbClusterDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterDatabaseParameters) DeepCopyInto(out *MongodbClusterDatabaseParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterDatabaseParameters. +func (in *MongodbClusterDatabaseParameters) DeepCopy() *MongodbClusterDatabaseParameters { + if in == nil { + return nil + } + out := new(MongodbClusterDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterHostInitParameters) DeepCopyInto(out *MongodbClusterHostInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.HostParameters != nil { + in, out := &in.HostParameters, &out.HostParameters + *out = make([]HostParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterHostInitParameters. +func (in *MongodbClusterHostInitParameters) DeepCopy() *MongodbClusterHostInitParameters { + if in == nil { + return nil + } + out := new(MongodbClusterHostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterHostObservation) DeepCopyInto(out *MongodbClusterHostObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.HostParameters != nil { + in, out := &in.HostParameters, &out.HostParameters + *out = make([]HostParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterHostObservation. +func (in *MongodbClusterHostObservation) DeepCopy() *MongodbClusterHostObservation { + if in == nil { + return nil + } + out := new(MongodbClusterHostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterHostParameters) DeepCopyInto(out *MongodbClusterHostParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.HostParameters != nil { + in, out := &in.HostParameters, &out.HostParameters + *out = make([]HostParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterHostParameters. +func (in *MongodbClusterHostParameters) DeepCopy() *MongodbClusterHostParameters { + if in == nil { + return nil + } + out := new(MongodbClusterHostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterInitParameters) DeepCopyInto(out *MongodbClusterInitParameters) { + *out = *in + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]MongodbClusterDatabaseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSizeAutoscalingMongocfg != nil { + in, out := &in.DiskSizeAutoscalingMongocfg, &out.DiskSizeAutoscalingMongocfg + *out = make([]DiskSizeAutoscalingMongocfgInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscalingMongod != nil { + in, out := &in.DiskSizeAutoscalingMongod, &out.DiskSizeAutoscalingMongod + *out = make([]DiskSizeAutoscalingMongodInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscalingMongoinfra != nil { + in, out := &in.DiskSizeAutoscalingMongoinfra, &out.DiskSizeAutoscalingMongoinfra + *out = make([]DiskSizeAutoscalingMongoinfraInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscalingMongos != nil { + in, out := &in.DiskSizeAutoscalingMongos, &out.DiskSizeAutoscalingMongos + *out = make([]DiskSizeAutoscalingMongosInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]MongodbClusterHostInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MongodbClusterMaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MongodbClusterResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongocfg != nil { + in, out := &in.ResourcesMongocfg, &out.ResourcesMongocfg + *out = make([]ResourcesMongocfgInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongod != nil { + in, out := &in.ResourcesMongod, &out.ResourcesMongod + *out = make([]ResourcesMongodInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongoinfra != nil { + in, out := &in.ResourcesMongoinfra, &out.ResourcesMongoinfra + *out = make([]ResourcesMongoinfraInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongos != nil { + in, out := &in.ResourcesMongos, &out.ResourcesMongos + *out = make([]ResourcesMongosInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = make([]RestoreInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]MongodbClusterUserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterInitParameters. +func (in *MongodbClusterInitParameters) DeepCopy() *MongodbClusterInitParameters { + if in == nil { + return nil + } + out := new(MongodbClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterList) DeepCopyInto(out *MongodbClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MongodbCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterList. +func (in *MongodbClusterList) DeepCopy() *MongodbClusterList { + if in == nil { + return nil + } + out := new(MongodbClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongodbClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterMaintenanceWindowInitParameters) DeepCopyInto(out *MongodbClusterMaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterMaintenanceWindowInitParameters. +func (in *MongodbClusterMaintenanceWindowInitParameters) DeepCopy() *MongodbClusterMaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(MongodbClusterMaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterMaintenanceWindowObservation) DeepCopyInto(out *MongodbClusterMaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterMaintenanceWindowObservation. +func (in *MongodbClusterMaintenanceWindowObservation) DeepCopy() *MongodbClusterMaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(MongodbClusterMaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterMaintenanceWindowParameters) DeepCopyInto(out *MongodbClusterMaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterMaintenanceWindowParameters. +func (in *MongodbClusterMaintenanceWindowParameters) DeepCopy() *MongodbClusterMaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(MongodbClusterMaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterObservation) DeepCopyInto(out *MongodbClusterObservation) { + *out = *in + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]MongodbClusterDatabaseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSizeAutoscalingMongocfg != nil { + in, out := &in.DiskSizeAutoscalingMongocfg, &out.DiskSizeAutoscalingMongocfg + *out = make([]DiskSizeAutoscalingMongocfgObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscalingMongod != nil { + in, out := &in.DiskSizeAutoscalingMongod, &out.DiskSizeAutoscalingMongod + *out = make([]DiskSizeAutoscalingMongodObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscalingMongoinfra != nil { + in, out := &in.DiskSizeAutoscalingMongoinfra, &out.DiskSizeAutoscalingMongoinfra + *out = make([]DiskSizeAutoscalingMongoinfraObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscalingMongos != nil { + in, out := &in.DiskSizeAutoscalingMongos, &out.DiskSizeAutoscalingMongos + *out = make([]DiskSizeAutoscalingMongosObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]MongodbClusterHostObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MongodbClusterMaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MongodbClusterResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongocfg != nil { + in, out := &in.ResourcesMongocfg, &out.ResourcesMongocfg + *out = make([]ResourcesMongocfgObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongod != nil { + in, out := &in.ResourcesMongod, &out.ResourcesMongod + *out = make([]ResourcesMongodObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongoinfra != nil { + in, out := &in.ResourcesMongoinfra, &out.ResourcesMongoinfra + *out = make([]ResourcesMongoinfraObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongos != nil { + in, out := &in.ResourcesMongos, &out.ResourcesMongos + *out = make([]ResourcesMongosObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = make([]RestoreObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Sharded != nil { + in, out := &in.Sharded, &out.Sharded + *out = new(bool) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]MongodbClusterUserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterObservation. +func (in *MongodbClusterObservation) DeepCopy() *MongodbClusterObservation { + if in == nil { + return nil + } + out := new(MongodbClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterParameters) DeepCopyInto(out *MongodbClusterParameters) { + *out = *in + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]MongodbClusterDatabaseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSizeAutoscalingMongocfg != nil { + in, out := &in.DiskSizeAutoscalingMongocfg, &out.DiskSizeAutoscalingMongocfg + *out = make([]DiskSizeAutoscalingMongocfgParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscalingMongod != nil { + in, out := &in.DiskSizeAutoscalingMongod, &out.DiskSizeAutoscalingMongod + *out = make([]DiskSizeAutoscalingMongodParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscalingMongoinfra != nil { + in, out := &in.DiskSizeAutoscalingMongoinfra, &out.DiskSizeAutoscalingMongoinfra + *out = make([]DiskSizeAutoscalingMongoinfraParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscalingMongos != nil { + in, out := &in.DiskSizeAutoscalingMongos, &out.DiskSizeAutoscalingMongos + *out = make([]DiskSizeAutoscalingMongosParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]MongodbClusterHostParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MongodbClusterMaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MongodbClusterResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongocfg != nil { + in, out := &in.ResourcesMongocfg, &out.ResourcesMongocfg + *out = make([]ResourcesMongocfgParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongod != nil { + in, out := &in.ResourcesMongod, &out.ResourcesMongod + *out = make([]ResourcesMongodParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongoinfra != nil { + in, out := &in.ResourcesMongoinfra, &out.ResourcesMongoinfra + *out = make([]ResourcesMongoinfraParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcesMongos != nil { + in, out := &in.ResourcesMongos, &out.ResourcesMongos + *out = make([]ResourcesMongosParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = make([]RestoreParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]MongodbClusterUserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterParameters. +func (in *MongodbClusterParameters) DeepCopy() *MongodbClusterParameters { + if in == nil { + return nil + } + out := new(MongodbClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterResourcesInitParameters) DeepCopyInto(out *MongodbClusterResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterResourcesInitParameters. +func (in *MongodbClusterResourcesInitParameters) DeepCopy() *MongodbClusterResourcesInitParameters { + if in == nil { + return nil + } + out := new(MongodbClusterResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterResourcesObservation) DeepCopyInto(out *MongodbClusterResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterResourcesObservation. +func (in *MongodbClusterResourcesObservation) DeepCopy() *MongodbClusterResourcesObservation { + if in == nil { + return nil + } + out := new(MongodbClusterResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterResourcesParameters) DeepCopyInto(out *MongodbClusterResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterResourcesParameters. +func (in *MongodbClusterResourcesParameters) DeepCopy() *MongodbClusterResourcesParameters { + if in == nil { + return nil + } + out := new(MongodbClusterResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterSpec) DeepCopyInto(out *MongodbClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterSpec. +func (in *MongodbClusterSpec) DeepCopy() *MongodbClusterSpec { + if in == nil { + return nil + } + out := new(MongodbClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterStatus) DeepCopyInto(out *MongodbClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterStatus. +func (in *MongodbClusterStatus) DeepCopy() *MongodbClusterStatus { + if in == nil { + return nil + } + out := new(MongodbClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterUserInitParameters) DeepCopyInto(out *MongodbClusterUserInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MongodbClusterUserPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterUserInitParameters. +func (in *MongodbClusterUserInitParameters) DeepCopy() *MongodbClusterUserInitParameters { + if in == nil { + return nil + } + out := new(MongodbClusterUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterUserObservation) DeepCopyInto(out *MongodbClusterUserObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MongodbClusterUserPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterUserObservation. +func (in *MongodbClusterUserObservation) DeepCopy() *MongodbClusterUserObservation { + if in == nil { + return nil + } + out := new(MongodbClusterUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterUserParameters) DeepCopyInto(out *MongodbClusterUserParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MongodbClusterUserPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterUserParameters. +func (in *MongodbClusterUserParameters) DeepCopy() *MongodbClusterUserParameters { + if in == nil { + return nil + } + out := new(MongodbClusterUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterUserPermissionInitParameters) DeepCopyInto(out *MongodbClusterUserPermissionInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterUserPermissionInitParameters. +func (in *MongodbClusterUserPermissionInitParameters) DeepCopy() *MongodbClusterUserPermissionInitParameters { + if in == nil { + return nil + } + out := new(MongodbClusterUserPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterUserPermissionObservation) DeepCopyInto(out *MongodbClusterUserPermissionObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterUserPermissionObservation. +func (in *MongodbClusterUserPermissionObservation) DeepCopy() *MongodbClusterUserPermissionObservation { + if in == nil { + return nil + } + out := new(MongodbClusterUserPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbClusterUserPermissionParameters) DeepCopyInto(out *MongodbClusterUserPermissionParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbClusterUserPermissionParameters. +func (in *MongodbClusterUserPermissionParameters) DeepCopy() *MongodbClusterUserPermissionParameters { + if in == nil { + return nil + } + out := new(MongodbClusterUserPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbDatabase) DeepCopyInto(out *MongodbDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabase. +func (in *MongodbDatabase) DeepCopy() *MongodbDatabase { + if in == nil { + return nil + } + out := new(MongodbDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongodbDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbDatabaseInitParameters) DeepCopyInto(out *MongodbDatabaseInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabaseInitParameters. +func (in *MongodbDatabaseInitParameters) DeepCopy() *MongodbDatabaseInitParameters { + if in == nil { + return nil + } + out := new(MongodbDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbDatabaseList) DeepCopyInto(out *MongodbDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MongodbDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabaseList. +func (in *MongodbDatabaseList) DeepCopy() *MongodbDatabaseList { + if in == nil { + return nil + } + out := new(MongodbDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongodbDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbDatabaseObservation) DeepCopyInto(out *MongodbDatabaseObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabaseObservation. +func (in *MongodbDatabaseObservation) DeepCopy() *MongodbDatabaseObservation { + if in == nil { + return nil + } + out := new(MongodbDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbDatabaseParameters) DeepCopyInto(out *MongodbDatabaseParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabaseParameters. +func (in *MongodbDatabaseParameters) DeepCopy() *MongodbDatabaseParameters { + if in == nil { + return nil + } + out := new(MongodbDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbDatabaseSpec) DeepCopyInto(out *MongodbDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabaseSpec. +func (in *MongodbDatabaseSpec) DeepCopy() *MongodbDatabaseSpec { + if in == nil { + return nil + } + out := new(MongodbDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbDatabaseStatus) DeepCopyInto(out *MongodbDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabaseStatus. +func (in *MongodbDatabaseStatus) DeepCopy() *MongodbDatabaseStatus { + if in == nil { + return nil + } + out := new(MongodbDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUser) DeepCopyInto(out *MongodbUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUser. +func (in *MongodbUser) DeepCopy() *MongodbUser { + if in == nil { + return nil + } + out := new(MongodbUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongodbUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUserInitParameters) DeepCopyInto(out *MongodbUserInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MongodbUserPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUserInitParameters. +func (in *MongodbUserInitParameters) DeepCopy() *MongodbUserInitParameters { + if in == nil { + return nil + } + out := new(MongodbUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUserList) DeepCopyInto(out *MongodbUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MongodbUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUserList. +func (in *MongodbUserList) DeepCopy() *MongodbUserList { + if in == nil { + return nil + } + out := new(MongodbUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongodbUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUserObservation) DeepCopyInto(out *MongodbUserObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MongodbUserPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUserObservation. +func (in *MongodbUserObservation) DeepCopy() *MongodbUserObservation { + if in == nil { + return nil + } + out := new(MongodbUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUserParameters) DeepCopyInto(out *MongodbUserParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MongodbUserPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUserParameters. +func (in *MongodbUserParameters) DeepCopy() *MongodbUserParameters { + if in == nil { + return nil + } + out := new(MongodbUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUserPermissionInitParameters) DeepCopyInto(out *MongodbUserPermissionInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUserPermissionInitParameters. +func (in *MongodbUserPermissionInitParameters) DeepCopy() *MongodbUserPermissionInitParameters { + if in == nil { + return nil + } + out := new(MongodbUserPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUserPermissionObservation) DeepCopyInto(out *MongodbUserPermissionObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUserPermissionObservation. +func (in *MongodbUserPermissionObservation) DeepCopy() *MongodbUserPermissionObservation { + if in == nil { + return nil + } + out := new(MongodbUserPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUserPermissionParameters) DeepCopyInto(out *MongodbUserPermissionParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUserPermissionParameters. +func (in *MongodbUserPermissionParameters) DeepCopy() *MongodbUserPermissionParameters { + if in == nil { + return nil + } + out := new(MongodbUserPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUserSpec) DeepCopyInto(out *MongodbUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUserSpec. +func (in *MongodbUserSpec) DeepCopy() *MongodbUserSpec { + if in == nil { + return nil + } + out := new(MongodbUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbUserStatus) DeepCopyInto(out *MongodbUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbUserStatus. +func (in *MongodbUserStatus) DeepCopy() *MongodbUserStatus { + if in == nil { + return nil + } + out := new(MongodbUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongosInitParameters) DeepCopyInto(out *MongosInitParameters) { + *out = *in + if in.Net != nil { + in, out := &in.Net, &out.Net + *out = make([]MongosNetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongosInitParameters. +func (in *MongosInitParameters) DeepCopy() *MongosInitParameters { + if in == nil { + return nil + } + out := new(MongosInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongosNetInitParameters) DeepCopyInto(out *MongosNetInitParameters) { + *out = *in + if in.Compressors != nil { + in, out := &in.Compressors, &out.Compressors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxIncomingConnections != nil { + in, out := &in.MaxIncomingConnections, &out.MaxIncomingConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongosNetInitParameters. +func (in *MongosNetInitParameters) DeepCopy() *MongosNetInitParameters { + if in == nil { + return nil + } + out := new(MongosNetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongosNetObservation) DeepCopyInto(out *MongosNetObservation) { + *out = *in + if in.Compressors != nil { + in, out := &in.Compressors, &out.Compressors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxIncomingConnections != nil { + in, out := &in.MaxIncomingConnections, &out.MaxIncomingConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongosNetObservation. +func (in *MongosNetObservation) DeepCopy() *MongosNetObservation { + if in == nil { + return nil + } + out := new(MongosNetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongosNetParameters) DeepCopyInto(out *MongosNetParameters) { + *out = *in + if in.Compressors != nil { + in, out := &in.Compressors, &out.Compressors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxIncomingConnections != nil { + in, out := &in.MaxIncomingConnections, &out.MaxIncomingConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongosNetParameters. +func (in *MongosNetParameters) DeepCopy() *MongosNetParameters { + if in == nil { + return nil + } + out := new(MongosNetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongosObservation) DeepCopyInto(out *MongosObservation) { + *out = *in + if in.Net != nil { + in, out := &in.Net, &out.Net + *out = make([]MongosNetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongosObservation. +func (in *MongosObservation) DeepCopy() *MongosObservation { + if in == nil { + return nil + } + out := new(MongosObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongosParameters) DeepCopyInto(out *MongosParameters) { + *out = *in + if in.Net != nil { + in, out := &in.Net, &out.Net + *out = make([]MongosNetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongosParameters. +func (in *MongosParameters) DeepCopy() *MongosParameters { + if in == nil { + return nil + } + out := new(MongosParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLCluster) DeepCopyInto(out *MySQLCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLCluster. +func (in *MySQLCluster) DeepCopy() *MySQLCluster { + if in == nil { + return nil + } + out := new(MySQLCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MySQLCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterAccessInitParameters) DeepCopyInto(out *MySQLClusterAccessInitParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterAccessInitParameters. +func (in *MySQLClusterAccessInitParameters) DeepCopy() *MySQLClusterAccessInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterAccessObservation) DeepCopyInto(out *MySQLClusterAccessObservation) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterAccessObservation. +func (in *MySQLClusterAccessObservation) DeepCopy() *MySQLClusterAccessObservation { + if in == nil { + return nil + } + out := new(MySQLClusterAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterAccessParameters) DeepCopyInto(out *MySQLClusterAccessParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterAccessParameters. +func (in *MySQLClusterAccessParameters) DeepCopy() *MySQLClusterAccessParameters { + if in == nil { + return nil + } + out := new(MySQLClusterAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterBackupWindowStartInitParameters) DeepCopyInto(out *MySQLClusterBackupWindowStartInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterBackupWindowStartInitParameters. +func (in *MySQLClusterBackupWindowStartInitParameters) DeepCopy() *MySQLClusterBackupWindowStartInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterBackupWindowStartInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterBackupWindowStartObservation) DeepCopyInto(out *MySQLClusterBackupWindowStartObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterBackupWindowStartObservation. +func (in *MySQLClusterBackupWindowStartObservation) DeepCopy() *MySQLClusterBackupWindowStartObservation { + if in == nil { + return nil + } + out := new(MySQLClusterBackupWindowStartObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterBackupWindowStartParameters) DeepCopyInto(out *MySQLClusterBackupWindowStartParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterBackupWindowStartParameters. +func (in *MySQLClusterBackupWindowStartParameters) DeepCopy() *MySQLClusterBackupWindowStartParameters { + if in == nil { + return nil + } + out := new(MySQLClusterBackupWindowStartParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterDatabaseInitParameters) DeepCopyInto(out *MySQLClusterDatabaseInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterDatabaseInitParameters. +func (in *MySQLClusterDatabaseInitParameters) DeepCopy() *MySQLClusterDatabaseInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterDatabaseObservation) DeepCopyInto(out *MySQLClusterDatabaseObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterDatabaseObservation. +func (in *MySQLClusterDatabaseObservation) DeepCopy() *MySQLClusterDatabaseObservation { + if in == nil { + return nil + } + out := new(MySQLClusterDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterDatabaseParameters) DeepCopyInto(out *MySQLClusterDatabaseParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterDatabaseParameters. +func (in *MySQLClusterDatabaseParameters) DeepCopy() *MySQLClusterDatabaseParameters { + if in == nil { + return nil + } + out := new(MySQLClusterDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterHostInitParameters) DeepCopyInto(out *MySQLClusterHostInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.BackupPriority != nil { + in, out := &in.BackupPriority, &out.BackupPriority + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ReplicationSourceName != nil { + in, out := &in.ReplicationSourceName, &out.ReplicationSourceName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterHostInitParameters. +func (in *MySQLClusterHostInitParameters) DeepCopy() *MySQLClusterHostInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterHostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterHostObservation) DeepCopyInto(out *MySQLClusterHostObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.BackupPriority != nil { + in, out := &in.BackupPriority, &out.BackupPriority + *out = new(float64) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ReplicationSource != nil { + in, out := &in.ReplicationSource, &out.ReplicationSource + *out = new(string) + **out = **in + } + if in.ReplicationSourceName != nil { + in, out := &in.ReplicationSourceName, &out.ReplicationSourceName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterHostObservation. +func (in *MySQLClusterHostObservation) DeepCopy() *MySQLClusterHostObservation { + if in == nil { + return nil + } + out := new(MySQLClusterHostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterHostParameters) DeepCopyInto(out *MySQLClusterHostParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.BackupPriority != nil { + in, out := &in.BackupPriority, &out.BackupPriority + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ReplicationSourceName != nil { + in, out := &in.ReplicationSourceName, &out.ReplicationSourceName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterHostParameters. +func (in *MySQLClusterHostParameters) DeepCopy() *MySQLClusterHostParameters { + if in == nil { + return nil + } + out := new(MySQLClusterHostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterInitParameters) DeepCopyInto(out *MySQLClusterInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]MySQLClusterAccessInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AllowRegenerationHost != nil { + in, out := &in.AllowRegenerationHost, &out.AllowRegenerationHost + *out = new(bool) + **out = **in + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]MySQLClusterBackupWindowStartInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]MySQLClusterDatabaseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]MySQLClusterHostInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MySQLClusterMaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MySQLConfig != nil { + in, out := &in.MySQLConfig, &out.MySQLConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PerformanceDiagnostics != nil { + in, out := &in.PerformanceDiagnostics, &out.PerformanceDiagnostics + *out = make([]MySQLClusterPerformanceDiagnosticsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MySQLClusterResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = make([]MySQLClusterRestoreInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]MySQLClusterUserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterInitParameters. +func (in *MySQLClusterInitParameters) DeepCopy() *MySQLClusterInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterList) DeepCopyInto(out *MySQLClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MySQLCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterList. +func (in *MySQLClusterList) DeepCopy() *MySQLClusterList { + if in == nil { + return nil + } + out := new(MySQLClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MySQLClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterMaintenanceWindowInitParameters) DeepCopyInto(out *MySQLClusterMaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterMaintenanceWindowInitParameters. +func (in *MySQLClusterMaintenanceWindowInitParameters) DeepCopy() *MySQLClusterMaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterMaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterMaintenanceWindowObservation) DeepCopyInto(out *MySQLClusterMaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterMaintenanceWindowObservation. +func (in *MySQLClusterMaintenanceWindowObservation) DeepCopy() *MySQLClusterMaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(MySQLClusterMaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterMaintenanceWindowParameters) DeepCopyInto(out *MySQLClusterMaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterMaintenanceWindowParameters. +func (in *MySQLClusterMaintenanceWindowParameters) DeepCopy() *MySQLClusterMaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(MySQLClusterMaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterObservation) DeepCopyInto(out *MySQLClusterObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]MySQLClusterAccessObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AllowRegenerationHost != nil { + in, out := &in.AllowRegenerationHost, &out.AllowRegenerationHost + *out = new(bool) + **out = **in + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]MySQLClusterBackupWindowStartObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]MySQLClusterDatabaseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]MySQLClusterHostObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MySQLClusterMaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MySQLConfig != nil { + in, out := &in.MySQLConfig, &out.MySQLConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.PerformanceDiagnostics != nil { + in, out := &in.PerformanceDiagnostics, &out.PerformanceDiagnostics + *out = make([]MySQLClusterPerformanceDiagnosticsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MySQLClusterResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = make([]MySQLClusterRestoreObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]MySQLClusterUserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterObservation. +func (in *MySQLClusterObservation) DeepCopy() *MySQLClusterObservation { + if in == nil { + return nil + } + out := new(MySQLClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterParameters) DeepCopyInto(out *MySQLClusterParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]MySQLClusterAccessParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AllowRegenerationHost != nil { + in, out := &in.AllowRegenerationHost, &out.AllowRegenerationHost + *out = new(bool) + **out = **in + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]MySQLClusterBackupWindowStartParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]MySQLClusterDatabaseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]MySQLClusterHostParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]MySQLClusterMaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MySQLConfig != nil { + in, out := &in.MySQLConfig, &out.MySQLConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PerformanceDiagnostics != nil { + in, out := &in.PerformanceDiagnostics, &out.PerformanceDiagnostics + *out = make([]MySQLClusterPerformanceDiagnosticsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]MySQLClusterResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = make([]MySQLClusterRestoreParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]MySQLClusterUserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterParameters. +func (in *MySQLClusterParameters) DeepCopy() *MySQLClusterParameters { + if in == nil { + return nil + } + out := new(MySQLClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterPerformanceDiagnosticsInitParameters) DeepCopyInto(out *MySQLClusterPerformanceDiagnosticsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SessionsSamplingInterval != nil { + in, out := &in.SessionsSamplingInterval, &out.SessionsSamplingInterval + *out = new(float64) + **out = **in + } + if in.StatementsSamplingInterval != nil { + in, out := &in.StatementsSamplingInterval, &out.StatementsSamplingInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterPerformanceDiagnosticsInitParameters. +func (in *MySQLClusterPerformanceDiagnosticsInitParameters) DeepCopy() *MySQLClusterPerformanceDiagnosticsInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterPerformanceDiagnosticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterPerformanceDiagnosticsObservation) DeepCopyInto(out *MySQLClusterPerformanceDiagnosticsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SessionsSamplingInterval != nil { + in, out := &in.SessionsSamplingInterval, &out.SessionsSamplingInterval + *out = new(float64) + **out = **in + } + if in.StatementsSamplingInterval != nil { + in, out := &in.StatementsSamplingInterval, &out.StatementsSamplingInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterPerformanceDiagnosticsObservation. +func (in *MySQLClusterPerformanceDiagnosticsObservation) DeepCopy() *MySQLClusterPerformanceDiagnosticsObservation { + if in == nil { + return nil + } + out := new(MySQLClusterPerformanceDiagnosticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterPerformanceDiagnosticsParameters) DeepCopyInto(out *MySQLClusterPerformanceDiagnosticsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SessionsSamplingInterval != nil { + in, out := &in.SessionsSamplingInterval, &out.SessionsSamplingInterval + *out = new(float64) + **out = **in + } + if in.StatementsSamplingInterval != nil { + in, out := &in.StatementsSamplingInterval, &out.StatementsSamplingInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterPerformanceDiagnosticsParameters. +func (in *MySQLClusterPerformanceDiagnosticsParameters) DeepCopy() *MySQLClusterPerformanceDiagnosticsParameters { + if in == nil { + return nil + } + out := new(MySQLClusterPerformanceDiagnosticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterResourcesInitParameters) DeepCopyInto(out *MySQLClusterResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterResourcesInitParameters. +func (in *MySQLClusterResourcesInitParameters) DeepCopy() *MySQLClusterResourcesInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterResourcesObservation) DeepCopyInto(out *MySQLClusterResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterResourcesObservation. +func (in *MySQLClusterResourcesObservation) DeepCopy() *MySQLClusterResourcesObservation { + if in == nil { + return nil + } + out := new(MySQLClusterResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterResourcesParameters) DeepCopyInto(out *MySQLClusterResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterResourcesParameters. +func (in *MySQLClusterResourcesParameters) DeepCopy() *MySQLClusterResourcesParameters { + if in == nil { + return nil + } + out := new(MySQLClusterResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterRestoreInitParameters) DeepCopyInto(out *MySQLClusterRestoreInitParameters) { + *out = *in + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterRestoreInitParameters. +func (in *MySQLClusterRestoreInitParameters) DeepCopy() *MySQLClusterRestoreInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterRestoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterRestoreObservation) DeepCopyInto(out *MySQLClusterRestoreObservation) { + *out = *in + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterRestoreObservation. +func (in *MySQLClusterRestoreObservation) DeepCopy() *MySQLClusterRestoreObservation { + if in == nil { + return nil + } + out := new(MySQLClusterRestoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterRestoreParameters) DeepCopyInto(out *MySQLClusterRestoreParameters) { + *out = *in + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterRestoreParameters. +func (in *MySQLClusterRestoreParameters) DeepCopy() *MySQLClusterRestoreParameters { + if in == nil { + return nil + } + out := new(MySQLClusterRestoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterSpec) DeepCopyInto(out *MySQLClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterSpec. +func (in *MySQLClusterSpec) DeepCopy() *MySQLClusterSpec { + if in == nil { + return nil + } + out := new(MySQLClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterStatus) DeepCopyInto(out *MySQLClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterStatus. +func (in *MySQLClusterStatus) DeepCopy() *MySQLClusterStatus { + if in == nil { + return nil + } + out := new(MySQLClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterUserInitParameters) DeepCopyInto(out *MySQLClusterUserInitParameters) { + *out = *in + if in.AuthenticationPlugin != nil { + in, out := &in.AuthenticationPlugin, &out.AuthenticationPlugin + *out = new(string) + **out = **in + } + if in.ConnectionLimits != nil { + in, out := &in.ConnectionLimits, &out.ConnectionLimits + *out = make([]ConnectionLimitsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalPermissions != nil { + in, out := &in.GlobalPermissions, &out.GlobalPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MySQLClusterUserPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterUserInitParameters. +func (in *MySQLClusterUserInitParameters) DeepCopy() *MySQLClusterUserInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterUserObservation) DeepCopyInto(out *MySQLClusterUserObservation) { + *out = *in + if in.AuthenticationPlugin != nil { + in, out := &in.AuthenticationPlugin, &out.AuthenticationPlugin + *out = new(string) + **out = **in + } + if in.ConnectionLimits != nil { + in, out := &in.ConnectionLimits, &out.ConnectionLimits + *out = make([]ConnectionLimitsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalPermissions != nil { + in, out := &in.GlobalPermissions, &out.GlobalPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MySQLClusterUserPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterUserObservation. +func (in *MySQLClusterUserObservation) DeepCopy() *MySQLClusterUserObservation { + if in == nil { + return nil + } + out := new(MySQLClusterUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterUserParameters) DeepCopyInto(out *MySQLClusterUserParameters) { + *out = *in + if in.AuthenticationPlugin != nil { + in, out := &in.AuthenticationPlugin, &out.AuthenticationPlugin + *out = new(string) + **out = **in + } + if in.ConnectionLimits != nil { + in, out := &in.ConnectionLimits, &out.ConnectionLimits + *out = make([]ConnectionLimitsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalPermissions != nil { + in, out := &in.GlobalPermissions, &out.GlobalPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MySQLClusterUserPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterUserParameters. +func (in *MySQLClusterUserParameters) DeepCopy() *MySQLClusterUserParameters { + if in == nil { + return nil + } + out := new(MySQLClusterUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterUserPermissionInitParameters) DeepCopyInto(out *MySQLClusterUserPermissionInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterUserPermissionInitParameters. +func (in *MySQLClusterUserPermissionInitParameters) DeepCopy() *MySQLClusterUserPermissionInitParameters { + if in == nil { + return nil + } + out := new(MySQLClusterUserPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterUserPermissionObservation) DeepCopyInto(out *MySQLClusterUserPermissionObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterUserPermissionObservation. +func (in *MySQLClusterUserPermissionObservation) DeepCopy() *MySQLClusterUserPermissionObservation { + if in == nil { + return nil + } + out := new(MySQLClusterUserPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLClusterUserPermissionParameters) DeepCopyInto(out *MySQLClusterUserPermissionParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLClusterUserPermissionParameters. +func (in *MySQLClusterUserPermissionParameters) DeepCopy() *MySQLClusterUserPermissionParameters { + if in == nil { + return nil + } + out := new(MySQLClusterUserPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLDatabase) DeepCopyInto(out *MySQLDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLDatabase. +func (in *MySQLDatabase) DeepCopy() *MySQLDatabase { + if in == nil { + return nil + } + out := new(MySQLDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MySQLDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLDatabaseInitParameters) DeepCopyInto(out *MySQLDatabaseInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLDatabaseInitParameters. +func (in *MySQLDatabaseInitParameters) DeepCopy() *MySQLDatabaseInitParameters { + if in == nil { + return nil + } + out := new(MySQLDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLDatabaseList) DeepCopyInto(out *MySQLDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MySQLDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLDatabaseList. +func (in *MySQLDatabaseList) DeepCopy() *MySQLDatabaseList { + if in == nil { + return nil + } + out := new(MySQLDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MySQLDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLDatabaseObservation) DeepCopyInto(out *MySQLDatabaseObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLDatabaseObservation. +func (in *MySQLDatabaseObservation) DeepCopy() *MySQLDatabaseObservation { + if in == nil { + return nil + } + out := new(MySQLDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLDatabaseParameters) DeepCopyInto(out *MySQLDatabaseParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLDatabaseParameters. +func (in *MySQLDatabaseParameters) DeepCopy() *MySQLDatabaseParameters { + if in == nil { + return nil + } + out := new(MySQLDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLDatabaseSpec) DeepCopyInto(out *MySQLDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLDatabaseSpec. +func (in *MySQLDatabaseSpec) DeepCopy() *MySQLDatabaseSpec { + if in == nil { + return nil + } + out := new(MySQLDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLDatabaseStatus) DeepCopyInto(out *MySQLDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLDatabaseStatus. +func (in *MySQLDatabaseStatus) DeepCopy() *MySQLDatabaseStatus { + if in == nil { + return nil + } + out := new(MySQLDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUser) DeepCopyInto(out *MySQLUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUser. +func (in *MySQLUser) DeepCopy() *MySQLUser { + if in == nil { + return nil + } + out := new(MySQLUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MySQLUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserConnectionLimitsInitParameters) DeepCopyInto(out *MySQLUserConnectionLimitsInitParameters) { + *out = *in + if in.MaxConnectionsPerHour != nil { + in, out := &in.MaxConnectionsPerHour, &out.MaxConnectionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxQuestionsPerHour != nil { + in, out := &in.MaxQuestionsPerHour, &out.MaxQuestionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxUpdatesPerHour != nil { + in, out := &in.MaxUpdatesPerHour, &out.MaxUpdatesPerHour + *out = new(float64) + **out = **in + } + if in.MaxUserConnections != nil { + in, out := &in.MaxUserConnections, &out.MaxUserConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserConnectionLimitsInitParameters. +func (in *MySQLUserConnectionLimitsInitParameters) DeepCopy() *MySQLUserConnectionLimitsInitParameters { + if in == nil { + return nil + } + out := new(MySQLUserConnectionLimitsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserConnectionLimitsObservation) DeepCopyInto(out *MySQLUserConnectionLimitsObservation) { + *out = *in + if in.MaxConnectionsPerHour != nil { + in, out := &in.MaxConnectionsPerHour, &out.MaxConnectionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxQuestionsPerHour != nil { + in, out := &in.MaxQuestionsPerHour, &out.MaxQuestionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxUpdatesPerHour != nil { + in, out := &in.MaxUpdatesPerHour, &out.MaxUpdatesPerHour + *out = new(float64) + **out = **in + } + if in.MaxUserConnections != nil { + in, out := &in.MaxUserConnections, &out.MaxUserConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserConnectionLimitsObservation. +func (in *MySQLUserConnectionLimitsObservation) DeepCopy() *MySQLUserConnectionLimitsObservation { + if in == nil { + return nil + } + out := new(MySQLUserConnectionLimitsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserConnectionLimitsParameters) DeepCopyInto(out *MySQLUserConnectionLimitsParameters) { + *out = *in + if in.MaxConnectionsPerHour != nil { + in, out := &in.MaxConnectionsPerHour, &out.MaxConnectionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxQuestionsPerHour != nil { + in, out := &in.MaxQuestionsPerHour, &out.MaxQuestionsPerHour + *out = new(float64) + **out = **in + } + if in.MaxUpdatesPerHour != nil { + in, out := &in.MaxUpdatesPerHour, &out.MaxUpdatesPerHour + *out = new(float64) + **out = **in + } + if in.MaxUserConnections != nil { + in, out := &in.MaxUserConnections, &out.MaxUserConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserConnectionLimitsParameters. +func (in *MySQLUserConnectionLimitsParameters) DeepCopy() *MySQLUserConnectionLimitsParameters { + if in == nil { + return nil + } + out := new(MySQLUserConnectionLimitsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserInitParameters) DeepCopyInto(out *MySQLUserInitParameters) { + *out = *in + if in.AuthenticationPlugin != nil { + in, out := &in.AuthenticationPlugin, &out.AuthenticationPlugin + *out = new(string) + **out = **in + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionLimits != nil { + in, out := &in.ConnectionLimits, &out.ConnectionLimits + *out = make([]MySQLUserConnectionLimitsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalPermissions != nil { + in, out := &in.GlobalPermissions, &out.GlobalPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MySQLUserPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserInitParameters. +func (in *MySQLUserInitParameters) DeepCopy() *MySQLUserInitParameters { + if in == nil { + return nil + } + out := new(MySQLUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserList) DeepCopyInto(out *MySQLUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MySQLUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserList. +func (in *MySQLUserList) DeepCopy() *MySQLUserList { + if in == nil { + return nil + } + out := new(MySQLUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MySQLUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserObservation) DeepCopyInto(out *MySQLUserObservation) { + *out = *in + if in.AuthenticationPlugin != nil { + in, out := &in.AuthenticationPlugin, &out.AuthenticationPlugin + *out = new(string) + **out = **in + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ConnectionLimits != nil { + in, out := &in.ConnectionLimits, &out.ConnectionLimits + *out = make([]MySQLUserConnectionLimitsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalPermissions != nil { + in, out := &in.GlobalPermissions, &out.GlobalPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MySQLUserPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserObservation. +func (in *MySQLUserObservation) DeepCopy() *MySQLUserObservation { + if in == nil { + return nil + } + out := new(MySQLUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserParameters) DeepCopyInto(out *MySQLUserParameters) { + *out = *in + if in.AuthenticationPlugin != nil { + in, out := &in.AuthenticationPlugin, &out.AuthenticationPlugin + *out = new(string) + **out = **in + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionLimits != nil { + in, out := &in.ConnectionLimits, &out.ConnectionLimits + *out = make([]MySQLUserConnectionLimitsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalPermissions != nil { + in, out := &in.GlobalPermissions, &out.GlobalPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]MySQLUserPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserParameters. +func (in *MySQLUserParameters) DeepCopy() *MySQLUserParameters { + if in == nil { + return nil + } + out := new(MySQLUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserPermissionInitParameters) DeepCopyInto(out *MySQLUserPermissionInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserPermissionInitParameters. +func (in *MySQLUserPermissionInitParameters) DeepCopy() *MySQLUserPermissionInitParameters { + if in == nil { + return nil + } + out := new(MySQLUserPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserPermissionObservation) DeepCopyInto(out *MySQLUserPermissionObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserPermissionObservation. +func (in *MySQLUserPermissionObservation) DeepCopy() *MySQLUserPermissionObservation { + if in == nil { + return nil + } + out := new(MySQLUserPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserPermissionParameters) DeepCopyInto(out *MySQLUserPermissionParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserPermissionParameters. +func (in *MySQLUserPermissionParameters) DeepCopy() *MySQLUserPermissionParameters { + if in == nil { + return nil + } + out := new(MySQLUserPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserSpec) DeepCopyInto(out *MySQLUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserSpec. +func (in *MySQLUserSpec) DeepCopy() *MySQLUserSpec { + if in == nil { + return nil + } + out := new(MySQLUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLUserStatus) DeepCopyInto(out *MySQLUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLUserStatus. +func (in *MySQLUserStatus) DeepCopy() *MySQLUserStatus { + if in == nil { + return nil + } + out := new(MySQLUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetInitParameters) DeepCopyInto(out *NetInitParameters) { + *out = *in + if in.MaxIncomingConnections != nil { + in, out := &in.MaxIncomingConnections, &out.MaxIncomingConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetInitParameters. +func (in *NetInitParameters) DeepCopy() *NetInitParameters { + if in == nil { + return nil + } + out := new(NetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetObservation) DeepCopyInto(out *NetObservation) { + *out = *in + if in.MaxIncomingConnections != nil { + in, out := &in.MaxIncomingConnections, &out.MaxIncomingConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetObservation. +func (in *NetObservation) DeepCopy() *NetObservation { + if in == nil { + return nil + } + out := new(NetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetParameters) DeepCopyInto(out *NetParameters) { + *out = *in + if in.MaxIncomingConnections != nil { + in, out := &in.MaxIncomingConnections, &out.MaxIncomingConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetParameters. +func (in *NetParameters) DeepCopy() *NetParameters { + if in == nil { + return nil + } + out := new(NetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperationProfilingInitParameters) DeepCopyInto(out *OperationProfilingInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.SlowOpThreshold != nil { + in, out := &in.SlowOpThreshold, &out.SlowOpThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationProfilingInitParameters. +func (in *OperationProfilingInitParameters) DeepCopy() *OperationProfilingInitParameters { + if in == nil { + return nil + } + out := new(OperationProfilingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperationProfilingObservation) DeepCopyInto(out *OperationProfilingObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.SlowOpThreshold != nil { + in, out := &in.SlowOpThreshold, &out.SlowOpThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationProfilingObservation. +func (in *OperationProfilingObservation) DeepCopy() *OperationProfilingObservation { + if in == nil { + return nil + } + out := new(OperationProfilingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperationProfilingParameters) DeepCopyInto(out *OperationProfilingParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.SlowOpThreshold != nil { + in, out := &in.SlowOpThreshold, &out.SlowOpThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationProfilingParameters. +func (in *OperationProfilingParameters) DeepCopy() *OperationProfilingParameters { + if in == nil { + return nil + } + out := new(OperationProfilingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatternInitParameters) DeepCopyInto(out *PatternInitParameters) { + *out = *in + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.Regexp != nil { + in, out := &in.Regexp, &out.Regexp + *out = new(string) + **out = **in + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = make([]RetentionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatternInitParameters. +func (in *PatternInitParameters) DeepCopy() *PatternInitParameters { + if in == nil { + return nil + } + out := new(PatternInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatternObservation) DeepCopyInto(out *PatternObservation) { + *out = *in + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.Regexp != nil { + in, out := &in.Regexp, &out.Regexp + *out = new(string) + **out = **in + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = make([]RetentionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatternObservation. +func (in *PatternObservation) DeepCopy() *PatternObservation { + if in == nil { + return nil + } + out := new(PatternObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatternParameters) DeepCopyInto(out *PatternParameters) { + *out = *in + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.Regexp != nil { + in, out := &in.Regexp, &out.Regexp + *out = new(string) + **out = **in + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = make([]RetentionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatternParameters. +func (in *PatternParameters) DeepCopy() *PatternParameters { + if in == nil { + return nil + } + out := new(PatternParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceDiagnosticsInitParameters) DeepCopyInto(out *PerformanceDiagnosticsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceDiagnosticsInitParameters. +func (in *PerformanceDiagnosticsInitParameters) DeepCopy() *PerformanceDiagnosticsInitParameters { + if in == nil { + return nil + } + out := new(PerformanceDiagnosticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceDiagnosticsObservation) DeepCopyInto(out *PerformanceDiagnosticsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceDiagnosticsObservation. +func (in *PerformanceDiagnosticsObservation) DeepCopy() *PerformanceDiagnosticsObservation { + if in == nil { + return nil + } + out := new(PerformanceDiagnosticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceDiagnosticsParameters) DeepCopyInto(out *PerformanceDiagnosticsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceDiagnosticsParameters. +func (in *PerformanceDiagnosticsParameters) DeepCopy() *PerformanceDiagnosticsParameters { + if in == nil { + return nil + } + out := new(PerformanceDiagnosticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionInitParameters) DeepCopyInto(out *PermissionInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionInitParameters. +func (in *PermissionInitParameters) DeepCopy() *PermissionInitParameters { + if in == nil { + return nil + } + out := new(PermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionObservation) DeepCopyInto(out *PermissionObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionObservation. +func (in *PermissionObservation) DeepCopy() *PermissionObservation { + if in == nil { + return nil + } + out := new(PermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionParameters) DeepCopyInto(out *PermissionParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionParameters. +func (in *PermissionParameters) DeepCopy() *PermissionParameters { + if in == nil { + return nil + } + out := new(PermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolerConfigInitParameters) DeepCopyInto(out *PoolerConfigInitParameters) { + *out = *in + if in.PoolClientIdleTimeout != nil { + in, out := &in.PoolClientIdleTimeout, &out.PoolClientIdleTimeout + *out = new(float64) + **out = **in + } + if in.PoolSize != nil { + in, out := &in.PoolSize, &out.PoolSize + *out = new(float64) + **out = **in + } + if in.PoolingMode != nil { + in, out := &in.PoolingMode, &out.PoolingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolerConfigInitParameters. +func (in *PoolerConfigInitParameters) DeepCopy() *PoolerConfigInitParameters { + if in == nil { + return nil + } + out := new(PoolerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolerConfigObservation) DeepCopyInto(out *PoolerConfigObservation) { + *out = *in + if in.PoolClientIdleTimeout != nil { + in, out := &in.PoolClientIdleTimeout, &out.PoolClientIdleTimeout + *out = new(float64) + **out = **in + } + if in.PoolSize != nil { + in, out := &in.PoolSize, &out.PoolSize + *out = new(float64) + **out = **in + } + if in.PoolingMode != nil { + in, out := &in.PoolingMode, &out.PoolingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolerConfigObservation. +func (in *PoolerConfigObservation) DeepCopy() *PoolerConfigObservation { + if in == nil { + return nil + } + out := new(PoolerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolerConfigParameters) DeepCopyInto(out *PoolerConfigParameters) { + *out = *in + if in.PoolClientIdleTimeout != nil { + in, out := &in.PoolClientIdleTimeout, &out.PoolClientIdleTimeout + *out = new(float64) + **out = **in + } + if in.PoolSize != nil { + in, out := &in.PoolSize, &out.PoolSize + *out = new(float64) + **out = **in + } + if in.PoolingMode != nil { + in, out := &in.PoolingMode, &out.PoolingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolerConfigParameters. +func (in *PoolerConfigParameters) DeepCopy() *PoolerConfigParameters { + if in == nil { + return nil + } + out := new(PoolerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlCluster) DeepCopyInto(out *PostgresqlCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlCluster. +func (in *PostgresqlCluster) DeepCopy() *PostgresqlCluster { + if in == nil { + return nil + } + out := new(PostgresqlCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresqlCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterConfigAccessInitParameters) DeepCopyInto(out *PostgresqlClusterConfigAccessInitParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.Serverless != nil { + in, out := &in.Serverless, &out.Serverless + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterConfigAccessInitParameters. +func (in *PostgresqlClusterConfigAccessInitParameters) DeepCopy() *PostgresqlClusterConfigAccessInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterConfigAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterConfigAccessObservation) DeepCopyInto(out *PostgresqlClusterConfigAccessObservation) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.Serverless != nil { + in, out := &in.Serverless, &out.Serverless + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterConfigAccessObservation. +func (in *PostgresqlClusterConfigAccessObservation) DeepCopy() *PostgresqlClusterConfigAccessObservation { + if in == nil { + return nil + } + out := new(PostgresqlClusterConfigAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterConfigAccessParameters) DeepCopyInto(out *PostgresqlClusterConfigAccessParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.DataTransfer != nil { + in, out := &in.DataTransfer, &out.DataTransfer + *out = new(bool) + **out = **in + } + if in.Serverless != nil { + in, out := &in.Serverless, &out.Serverless + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterConfigAccessParameters. +func (in *PostgresqlClusterConfigAccessParameters) DeepCopy() *PostgresqlClusterConfigAccessParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterConfigAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterConfigInitParameters) DeepCopyInto(out *PostgresqlClusterConfigInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]PostgresqlClusterConfigAccessInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Autofailover != nil { + in, out := &in.Autofailover, &out.Autofailover + *out = new(bool) + **out = **in + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]ConfigBackupWindowStartInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscaling != nil { + in, out := &in.DiskSizeAutoscaling, &out.DiskSizeAutoscaling + *out = make([]ConfigDiskSizeAutoscalingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceDiagnostics != nil { + in, out := &in.PerformanceDiagnostics, &out.PerformanceDiagnostics + *out = make([]ConfigPerformanceDiagnosticsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PoolerConfig != nil { + in, out := &in.PoolerConfig, &out.PoolerConfig + *out = make([]ConfigPoolerConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostgresqlConfig != nil { + in, out := &in.PostgresqlConfig, &out.PostgresqlConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ConfigResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterConfigInitParameters. +func (in *PostgresqlClusterConfigInitParameters) DeepCopy() *PostgresqlClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterConfigObservation) DeepCopyInto(out *PostgresqlClusterConfigObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]PostgresqlClusterConfigAccessObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Autofailover != nil { + in, out := &in.Autofailover, &out.Autofailover + *out = new(bool) + **out = **in + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]ConfigBackupWindowStartObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscaling != nil { + in, out := &in.DiskSizeAutoscaling, &out.DiskSizeAutoscaling + *out = make([]ConfigDiskSizeAutoscalingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceDiagnostics != nil { + in, out := &in.PerformanceDiagnostics, &out.PerformanceDiagnostics + *out = make([]ConfigPerformanceDiagnosticsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PoolerConfig != nil { + in, out := &in.PoolerConfig, &out.PoolerConfig + *out = make([]ConfigPoolerConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostgresqlConfig != nil { + in, out := &in.PostgresqlConfig, &out.PostgresqlConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ConfigResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterConfigObservation. +func (in *PostgresqlClusterConfigObservation) DeepCopy() *PostgresqlClusterConfigObservation { + if in == nil { + return nil + } + out := new(PostgresqlClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterConfigParameters) DeepCopyInto(out *PostgresqlClusterConfigParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]PostgresqlClusterConfigAccessParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Autofailover != nil { + in, out := &in.Autofailover, &out.Autofailover + *out = new(bool) + **out = **in + } + if in.BackupRetainPeriodDays != nil { + in, out := &in.BackupRetainPeriodDays, &out.BackupRetainPeriodDays + *out = new(float64) + **out = **in + } + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]ConfigBackupWindowStartParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskSizeAutoscaling != nil { + in, out := &in.DiskSizeAutoscaling, &out.DiskSizeAutoscaling + *out = make([]ConfigDiskSizeAutoscalingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceDiagnostics != nil { + in, out := &in.PerformanceDiagnostics, &out.PerformanceDiagnostics + *out = make([]ConfigPerformanceDiagnosticsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PoolerConfig != nil { + in, out := &in.PoolerConfig, &out.PoolerConfig + *out = make([]ConfigPoolerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostgresqlConfig != nil { + in, out := &in.PostgresqlConfig, &out.PostgresqlConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ConfigResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterConfigParameters. +func (in *PostgresqlClusterConfigParameters) DeepCopy() *PostgresqlClusterConfigParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterDatabaseInitParameters) DeepCopyInto(out *PostgresqlClusterDatabaseInitParameters) { + *out = *in + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]ExtensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LcCollate != nil { + in, out := &in.LcCollate, &out.LcCollate + *out = new(string) + **out = **in + } + if in.LcType != nil { + in, out := &in.LcType, &out.LcType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.TemplateDB != nil { + in, out := &in.TemplateDB, &out.TemplateDB + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterDatabaseInitParameters. +func (in *PostgresqlClusterDatabaseInitParameters) DeepCopy() *PostgresqlClusterDatabaseInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterDatabaseObservation) DeepCopyInto(out *PostgresqlClusterDatabaseObservation) { + *out = *in + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]ExtensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LcCollate != nil { + in, out := &in.LcCollate, &out.LcCollate + *out = new(string) + **out = **in + } + if in.LcType != nil { + in, out := &in.LcType, &out.LcType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.TemplateDB != nil { + in, out := &in.TemplateDB, &out.TemplateDB + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterDatabaseObservation. +func (in *PostgresqlClusterDatabaseObservation) DeepCopy() *PostgresqlClusterDatabaseObservation { + if in == nil { + return nil + } + out := new(PostgresqlClusterDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterDatabaseParameters) DeepCopyInto(out *PostgresqlClusterDatabaseParameters) { + *out = *in + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]ExtensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LcCollate != nil { + in, out := &in.LcCollate, &out.LcCollate + *out = new(string) + **out = **in + } + if in.LcType != nil { + in, out := &in.LcType, &out.LcType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.TemplateDB != nil { + in, out := &in.TemplateDB, &out.TemplateDB + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterDatabaseParameters. +func (in *PostgresqlClusterDatabaseParameters) DeepCopy() *PostgresqlClusterDatabaseParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterHostInitParameters) DeepCopyInto(out *PostgresqlClusterHostInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ReplicationSourceName != nil { + in, out := &in.ReplicationSourceName, &out.ReplicationSourceName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterHostInitParameters. +func (in *PostgresqlClusterHostInitParameters) DeepCopy() *PostgresqlClusterHostInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterHostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterHostObservation) DeepCopyInto(out *PostgresqlClusterHostObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ReplicationSource != nil { + in, out := &in.ReplicationSource, &out.ReplicationSource + *out = new(string) + **out = **in + } + if in.ReplicationSourceName != nil { + in, out := &in.ReplicationSourceName, &out.ReplicationSourceName + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterHostObservation. +func (in *PostgresqlClusterHostObservation) DeepCopy() *PostgresqlClusterHostObservation { + if in == nil { + return nil + } + out := new(PostgresqlClusterHostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterHostParameters) DeepCopyInto(out *PostgresqlClusterHostParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ReplicationSourceName != nil { + in, out := &in.ReplicationSourceName, &out.ReplicationSourceName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterHostParameters. +func (in *PostgresqlClusterHostParameters) DeepCopy() *PostgresqlClusterHostParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterHostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterInitParameters) DeepCopyInto(out *PostgresqlClusterInitParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]PostgresqlClusterConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]PostgresqlClusterDatabaseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]PostgresqlClusterHostInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HostMasterName != nil { + in, out := &in.HostMasterName, &out.HostMasterName + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]PostgresqlClusterMaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = make([]PostgresqlClusterRestoreInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]PostgresqlClusterUserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterInitParameters. +func (in *PostgresqlClusterInitParameters) DeepCopy() *PostgresqlClusterInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterList) DeepCopyInto(out *PostgresqlClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresqlCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterList. +func (in *PostgresqlClusterList) DeepCopy() *PostgresqlClusterList { + if in == nil { + return nil + } + out := new(PostgresqlClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresqlClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterMaintenanceWindowInitParameters) DeepCopyInto(out *PostgresqlClusterMaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterMaintenanceWindowInitParameters. +func (in *PostgresqlClusterMaintenanceWindowInitParameters) DeepCopy() *PostgresqlClusterMaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterMaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterMaintenanceWindowObservation) DeepCopyInto(out *PostgresqlClusterMaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterMaintenanceWindowObservation. +func (in *PostgresqlClusterMaintenanceWindowObservation) DeepCopy() *PostgresqlClusterMaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(PostgresqlClusterMaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterMaintenanceWindowParameters) DeepCopyInto(out *PostgresqlClusterMaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterMaintenanceWindowParameters. +func (in *PostgresqlClusterMaintenanceWindowParameters) DeepCopy() *PostgresqlClusterMaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterMaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterObservation) DeepCopyInto(out *PostgresqlClusterObservation) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]PostgresqlClusterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]PostgresqlClusterDatabaseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]PostgresqlClusterHostObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HostMasterName != nil { + in, out := &in.HostMasterName, &out.HostMasterName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]PostgresqlClusterMaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = make([]PostgresqlClusterRestoreObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]PostgresqlClusterUserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterObservation. +func (in *PostgresqlClusterObservation) DeepCopy() *PostgresqlClusterObservation { + if in == nil { + return nil + } + out := new(PostgresqlClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterParameters) DeepCopyInto(out *PostgresqlClusterParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]PostgresqlClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]PostgresqlClusterDatabaseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]PostgresqlClusterHostParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HostMasterName != nil { + in, out := &in.HostMasterName, &out.HostMasterName + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]PostgresqlClusterMaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = make([]PostgresqlClusterRestoreParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]PostgresqlClusterUserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterParameters. +func (in *PostgresqlClusterParameters) DeepCopy() *PostgresqlClusterParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterRestoreInitParameters) DeepCopyInto(out *PostgresqlClusterRestoreInitParameters) { + *out = *in + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.TimeInclusive != nil { + in, out := &in.TimeInclusive, &out.TimeInclusive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterRestoreInitParameters. +func (in *PostgresqlClusterRestoreInitParameters) DeepCopy() *PostgresqlClusterRestoreInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterRestoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterRestoreObservation) DeepCopyInto(out *PostgresqlClusterRestoreObservation) { + *out = *in + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.TimeInclusive != nil { + in, out := &in.TimeInclusive, &out.TimeInclusive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterRestoreObservation. +func (in *PostgresqlClusterRestoreObservation) DeepCopy() *PostgresqlClusterRestoreObservation { + if in == nil { + return nil + } + out := new(PostgresqlClusterRestoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterRestoreParameters) DeepCopyInto(out *PostgresqlClusterRestoreParameters) { + *out = *in + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.TimeInclusive != nil { + in, out := &in.TimeInclusive, &out.TimeInclusive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterRestoreParameters. +func (in *PostgresqlClusterRestoreParameters) DeepCopy() *PostgresqlClusterRestoreParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterRestoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterSpec) DeepCopyInto(out *PostgresqlClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterSpec. +func (in *PostgresqlClusterSpec) DeepCopy() *PostgresqlClusterSpec { + if in == nil { + return nil + } + out := new(PostgresqlClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterStatus) DeepCopyInto(out *PostgresqlClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterStatus. +func (in *PostgresqlClusterStatus) DeepCopy() *PostgresqlClusterStatus { + if in == nil { + return nil + } + out := new(PostgresqlClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterUserInitParameters) DeepCopyInto(out *PostgresqlClusterUserInitParameters) { + *out = *in + if in.ConnLimit != nil { + in, out := &in.ConnLimit, &out.ConnLimit + *out = new(float64) + **out = **in + } + if in.Grants != nil { + in, out := &in.Grants, &out.Grants + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]PostgresqlClusterUserPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterUserInitParameters. +func (in *PostgresqlClusterUserInitParameters) DeepCopy() *PostgresqlClusterUserInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterUserObservation) DeepCopyInto(out *PostgresqlClusterUserObservation) { + *out = *in + if in.ConnLimit != nil { + in, out := &in.ConnLimit, &out.ConnLimit + *out = new(float64) + **out = **in + } + if in.Grants != nil { + in, out := &in.Grants, &out.Grants + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]PostgresqlClusterUserPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterUserObservation. +func (in *PostgresqlClusterUserObservation) DeepCopy() *PostgresqlClusterUserObservation { + if in == nil { + return nil + } + out := new(PostgresqlClusterUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterUserParameters) DeepCopyInto(out *PostgresqlClusterUserParameters) { + *out = *in + if in.ConnLimit != nil { + in, out := &in.ConnLimit, &out.ConnLimit + *out = new(float64) + **out = **in + } + if in.Grants != nil { + in, out := &in.Grants, &out.Grants + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]PostgresqlClusterUserPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterUserParameters. +func (in *PostgresqlClusterUserParameters) DeepCopy() *PostgresqlClusterUserParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterUserPermissionInitParameters) DeepCopyInto(out *PostgresqlClusterUserPermissionInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterUserPermissionInitParameters. +func (in *PostgresqlClusterUserPermissionInitParameters) DeepCopy() *PostgresqlClusterUserPermissionInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterUserPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterUserPermissionObservation) DeepCopyInto(out *PostgresqlClusterUserPermissionObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterUserPermissionObservation. +func (in *PostgresqlClusterUserPermissionObservation) DeepCopy() *PostgresqlClusterUserPermissionObservation { + if in == nil { + return nil + } + out := new(PostgresqlClusterUserPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlClusterUserPermissionParameters) DeepCopyInto(out *PostgresqlClusterUserPermissionParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlClusterUserPermissionParameters. +func (in *PostgresqlClusterUserPermissionParameters) DeepCopy() *PostgresqlClusterUserPermissionParameters { + if in == nil { + return nil + } + out := new(PostgresqlClusterUserPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabase) DeepCopyInto(out *PostgresqlDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabase. +func (in *PostgresqlDatabase) DeepCopy() *PostgresqlDatabase { + if in == nil { + return nil + } + out := new(PostgresqlDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresqlDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabaseExtensionInitParameters) DeepCopyInto(out *PostgresqlDatabaseExtensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabaseExtensionInitParameters. +func (in *PostgresqlDatabaseExtensionInitParameters) DeepCopy() *PostgresqlDatabaseExtensionInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlDatabaseExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabaseExtensionObservation) DeepCopyInto(out *PostgresqlDatabaseExtensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabaseExtensionObservation. +func (in *PostgresqlDatabaseExtensionObservation) DeepCopy() *PostgresqlDatabaseExtensionObservation { + if in == nil { + return nil + } + out := new(PostgresqlDatabaseExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabaseExtensionParameters) DeepCopyInto(out *PostgresqlDatabaseExtensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabaseExtensionParameters. +func (in *PostgresqlDatabaseExtensionParameters) DeepCopy() *PostgresqlDatabaseExtensionParameters { + if in == nil { + return nil + } + out := new(PostgresqlDatabaseExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabaseInitParameters) DeepCopyInto(out *PostgresqlDatabaseInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]PostgresqlDatabaseExtensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LcCollate != nil { + in, out := &in.LcCollate, &out.LcCollate + *out = new(string) + **out = **in + } + if in.LcType != nil { + in, out := &in.LcType, &out.LcType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.OwnerRef != nil { + in, out := &in.OwnerRef, &out.OwnerRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OwnerSelector != nil { + in, out := &in.OwnerSelector, &out.OwnerSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TemplateDB != nil { + in, out := &in.TemplateDB, &out.TemplateDB + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabaseInitParameters. +func (in *PostgresqlDatabaseInitParameters) DeepCopy() *PostgresqlDatabaseInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabaseList) DeepCopyInto(out *PostgresqlDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresqlDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabaseList. +func (in *PostgresqlDatabaseList) DeepCopy() *PostgresqlDatabaseList { + if in == nil { + return nil + } + out := new(PostgresqlDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresqlDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabaseObservation) DeepCopyInto(out *PostgresqlDatabaseObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]PostgresqlDatabaseExtensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LcCollate != nil { + in, out := &in.LcCollate, &out.LcCollate + *out = new(string) + **out = **in + } + if in.LcType != nil { + in, out := &in.LcType, &out.LcType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.TemplateDB != nil { + in, out := &in.TemplateDB, &out.TemplateDB + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabaseObservation. +func (in *PostgresqlDatabaseObservation) DeepCopy() *PostgresqlDatabaseObservation { + if in == nil { + return nil + } + out := new(PostgresqlDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabaseParameters) DeepCopyInto(out *PostgresqlDatabaseParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]PostgresqlDatabaseExtensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LcCollate != nil { + in, out := &in.LcCollate, &out.LcCollate + *out = new(string) + **out = **in + } + if in.LcType != nil { + in, out := &in.LcType, &out.LcType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.OwnerRef != nil { + in, out := &in.OwnerRef, &out.OwnerRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OwnerSelector != nil { + in, out := &in.OwnerSelector, &out.OwnerSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TemplateDB != nil { + in, out := &in.TemplateDB, &out.TemplateDB + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabaseParameters. +func (in *PostgresqlDatabaseParameters) DeepCopy() *PostgresqlDatabaseParameters { + if in == nil { + return nil + } + out := new(PostgresqlDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabaseSpec) DeepCopyInto(out *PostgresqlDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabaseSpec. +func (in *PostgresqlDatabaseSpec) DeepCopy() *PostgresqlDatabaseSpec { + if in == nil { + return nil + } + out := new(PostgresqlDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlDatabaseStatus) DeepCopyInto(out *PostgresqlDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlDatabaseStatus. +func (in *PostgresqlDatabaseStatus) DeepCopy() *PostgresqlDatabaseStatus { + if in == nil { + return nil + } + out := new(PostgresqlDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUser) DeepCopyInto(out *PostgresqlUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUser. +func (in *PostgresqlUser) DeepCopy() *PostgresqlUser { + if in == nil { + return nil + } + out := new(PostgresqlUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresqlUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUserInitParameters) DeepCopyInto(out *PostgresqlUserInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnLimit != nil { + in, out := &in.ConnLimit, &out.ConnLimit + *out = new(float64) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(string) + **out = **in + } + if in.Grants != nil { + in, out := &in.Grants, &out.Grants + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]PostgresqlUserPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUserInitParameters. +func (in *PostgresqlUserInitParameters) DeepCopy() *PostgresqlUserInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUserList) DeepCopyInto(out *PostgresqlUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresqlUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUserList. +func (in *PostgresqlUserList) DeepCopy() *PostgresqlUserList { + if in == nil { + return nil + } + out := new(PostgresqlUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresqlUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUserObservation) DeepCopyInto(out *PostgresqlUserObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ConnLimit != nil { + in, out := &in.ConnLimit, &out.ConnLimit + *out = new(float64) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(string) + **out = **in + } + if in.Grants != nil { + in, out := &in.Grants, &out.Grants + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]PostgresqlUserPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUserObservation. +func (in *PostgresqlUserObservation) DeepCopy() *PostgresqlUserObservation { + if in == nil { + return nil + } + out := new(PostgresqlUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUserParameters) DeepCopyInto(out *PostgresqlUserParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ClusterIDRef != nil { + in, out := &in.ClusterIDRef, &out.ClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterIDSelector != nil { + in, out := &in.ClusterIDSelector, &out.ClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnLimit != nil { + in, out := &in.ConnLimit, &out.ConnLimit + *out = new(float64) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(string) + **out = **in + } + if in.Grants != nil { + in, out := &in.Grants, &out.Grants + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]PostgresqlUserPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUserParameters. +func (in *PostgresqlUserParameters) DeepCopy() *PostgresqlUserParameters { + if in == nil { + return nil + } + out := new(PostgresqlUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUserPermissionInitParameters) DeepCopyInto(out *PostgresqlUserPermissionInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUserPermissionInitParameters. +func (in *PostgresqlUserPermissionInitParameters) DeepCopy() *PostgresqlUserPermissionInitParameters { + if in == nil { + return nil + } + out := new(PostgresqlUserPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUserPermissionObservation) DeepCopyInto(out *PostgresqlUserPermissionObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUserPermissionObservation. +func (in *PostgresqlUserPermissionObservation) DeepCopy() *PostgresqlUserPermissionObservation { + if in == nil { + return nil + } + out := new(PostgresqlUserPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUserPermissionParameters) DeepCopyInto(out *PostgresqlUserPermissionParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUserPermissionParameters. +func (in *PostgresqlUserPermissionParameters) DeepCopy() *PostgresqlUserPermissionParameters { + if in == nil { + return nil + } + out := new(PostgresqlUserPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUserSpec) DeepCopyInto(out *PostgresqlUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUserSpec. +func (in *PostgresqlUserSpec) DeepCopy() *PostgresqlUserSpec { + if in == nil { + return nil + } + out := new(PostgresqlUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlUserStatus) DeepCopyInto(out *PostgresqlUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlUserStatus. +func (in *PostgresqlUserStatus) DeepCopy() *PostgresqlUserStatus { + if in == nil { + return nil + } + out := new(PostgresqlUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PxfConfigInitParameters) DeepCopyInto(out *PxfConfigInitParameters) { + *out = *in + if in.ConnectionTimeout != nil { + in, out := &in.ConnectionTimeout, &out.ConnectionTimeout + *out = new(float64) + **out = **in + } + if in.MaxThreads != nil { + in, out := &in.MaxThreads, &out.MaxThreads + *out = new(float64) + **out = **in + } + if in.PoolAllowCoreThreadTimeout != nil { + in, out := &in.PoolAllowCoreThreadTimeout, &out.PoolAllowCoreThreadTimeout + *out = new(bool) + **out = **in + } + if in.PoolCoreSize != nil { + in, out := &in.PoolCoreSize, &out.PoolCoreSize + *out = new(float64) + **out = **in + } + if in.PoolMaxSize != nil { + in, out := &in.PoolMaxSize, &out.PoolMaxSize + *out = new(float64) + **out = **in + } + if in.PoolQueueCapacity != nil { + in, out := &in.PoolQueueCapacity, &out.PoolQueueCapacity + *out = new(float64) + **out = **in + } + if in.UploadTimeout != nil { + in, out := &in.UploadTimeout, &out.UploadTimeout + *out = new(float64) + **out = **in + } + if in.Xms != nil { + in, out := &in.Xms, &out.Xms + *out = new(float64) + **out = **in + } + if in.Xmx != nil { + in, out := &in.Xmx, &out.Xmx + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PxfConfigInitParameters. +func (in *PxfConfigInitParameters) DeepCopy() *PxfConfigInitParameters { + if in == nil { + return nil + } + out := new(PxfConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PxfConfigObservation) DeepCopyInto(out *PxfConfigObservation) { + *out = *in + if in.ConnectionTimeout != nil { + in, out := &in.ConnectionTimeout, &out.ConnectionTimeout + *out = new(float64) + **out = **in + } + if in.MaxThreads != nil { + in, out := &in.MaxThreads, &out.MaxThreads + *out = new(float64) + **out = **in + } + if in.PoolAllowCoreThreadTimeout != nil { + in, out := &in.PoolAllowCoreThreadTimeout, &out.PoolAllowCoreThreadTimeout + *out = new(bool) + **out = **in + } + if in.PoolCoreSize != nil { + in, out := &in.PoolCoreSize, &out.PoolCoreSize + *out = new(float64) + **out = **in + } + if in.PoolMaxSize != nil { + in, out := &in.PoolMaxSize, &out.PoolMaxSize + *out = new(float64) + **out = **in + } + if in.PoolQueueCapacity != nil { + in, out := &in.PoolQueueCapacity, &out.PoolQueueCapacity + *out = new(float64) + **out = **in + } + if in.UploadTimeout != nil { + in, out := &in.UploadTimeout, &out.UploadTimeout + *out = new(float64) + **out = **in + } + if in.Xms != nil { + in, out := &in.Xms, &out.Xms + *out = new(float64) + **out = **in + } + if in.Xmx != nil { + in, out := &in.Xmx, &out.Xmx + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PxfConfigObservation. +func (in *PxfConfigObservation) DeepCopy() *PxfConfigObservation { + if in == nil { + return nil + } + out := new(PxfConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PxfConfigParameters) DeepCopyInto(out *PxfConfigParameters) { + *out = *in + if in.ConnectionTimeout != nil { + in, out := &in.ConnectionTimeout, &out.ConnectionTimeout + *out = new(float64) + **out = **in + } + if in.MaxThreads != nil { + in, out := &in.MaxThreads, &out.MaxThreads + *out = new(float64) + **out = **in + } + if in.PoolAllowCoreThreadTimeout != nil { + in, out := &in.PoolAllowCoreThreadTimeout, &out.PoolAllowCoreThreadTimeout + *out = new(bool) + **out = **in + } + if in.PoolCoreSize != nil { + in, out := &in.PoolCoreSize, &out.PoolCoreSize + *out = new(float64) + **out = **in + } + if in.PoolMaxSize != nil { + in, out := &in.PoolMaxSize, &out.PoolMaxSize + *out = new(float64) + **out = **in + } + if in.PoolQueueCapacity != nil { + in, out := &in.PoolQueueCapacity, &out.PoolQueueCapacity + *out = new(float64) + **out = **in + } + if in.UploadTimeout != nil { + in, out := &in.UploadTimeout, &out.UploadTimeout + *out = new(float64) + **out = **in + } + if in.Xms != nil { + in, out := &in.Xms, &out.Xms + *out = new(float64) + **out = **in + } + if in.Xmx != nil { + in, out := &in.Xmx, &out.Xmx + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PxfConfigParameters. +func (in *PxfConfigParameters) DeepCopy() *PxfConfigParameters { + if in == nil { + return nil + } + out := new(PxfConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryCacheInitParameters) DeepCopyInto(out *QueryCacheInitParameters) { + *out = *in + if in.MaxEntries != nil { + in, out := &in.MaxEntries, &out.MaxEntries + *out = new(float64) + **out = **in + } + if in.MaxEntrySizeInBytes != nil { + in, out := &in.MaxEntrySizeInBytes, &out.MaxEntrySizeInBytes + *out = new(float64) + **out = **in + } + if in.MaxEntrySizeInRows != nil { + in, out := &in.MaxEntrySizeInRows, &out.MaxEntrySizeInRows + *out = new(float64) + **out = **in + } + if in.MaxSizeInBytes != nil { + in, out := &in.MaxSizeInBytes, &out.MaxSizeInBytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryCacheInitParameters. +func (in *QueryCacheInitParameters) DeepCopy() *QueryCacheInitParameters { + if in == nil { + return nil + } + out := new(QueryCacheInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryCacheObservation) DeepCopyInto(out *QueryCacheObservation) { + *out = *in + if in.MaxEntries != nil { + in, out := &in.MaxEntries, &out.MaxEntries + *out = new(float64) + **out = **in + } + if in.MaxEntrySizeInBytes != nil { + in, out := &in.MaxEntrySizeInBytes, &out.MaxEntrySizeInBytes + *out = new(float64) + **out = **in + } + if in.MaxEntrySizeInRows != nil { + in, out := &in.MaxEntrySizeInRows, &out.MaxEntrySizeInRows + *out = new(float64) + **out = **in + } + if in.MaxSizeInBytes != nil { + in, out := &in.MaxSizeInBytes, &out.MaxSizeInBytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryCacheObservation. +func (in *QueryCacheObservation) DeepCopy() *QueryCacheObservation { + if in == nil { + return nil + } + out := new(QueryCacheObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryCacheParameters) DeepCopyInto(out *QueryCacheParameters) { + *out = *in + if in.MaxEntries != nil { + in, out := &in.MaxEntries, &out.MaxEntries + *out = new(float64) + **out = **in + } + if in.MaxEntrySizeInBytes != nil { + in, out := &in.MaxEntrySizeInBytes, &out.MaxEntrySizeInBytes + *out = new(float64) + **out = **in + } + if in.MaxEntrySizeInRows != nil { + in, out := &in.MaxEntrySizeInRows, &out.MaxEntrySizeInRows + *out = new(float64) + **out = **in + } + if in.MaxSizeInBytes != nil { + in, out := &in.MaxSizeInBytes, &out.MaxSizeInBytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryCacheParameters. +func (in *QueryCacheParameters) DeepCopy() *QueryCacheParameters { + if in == nil { + return nil + } + out := new(QueryCacheParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKillerIdleInTransactionInitParameters) DeepCopyInto(out *QueryKillerIdleInTransactionInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKillerIdleInTransactionInitParameters. +func (in *QueryKillerIdleInTransactionInitParameters) DeepCopy() *QueryKillerIdleInTransactionInitParameters { + if in == nil { + return nil + } + out := new(QueryKillerIdleInTransactionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKillerIdleInTransactionObservation) DeepCopyInto(out *QueryKillerIdleInTransactionObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKillerIdleInTransactionObservation. +func (in *QueryKillerIdleInTransactionObservation) DeepCopy() *QueryKillerIdleInTransactionObservation { + if in == nil { + return nil + } + out := new(QueryKillerIdleInTransactionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKillerIdleInTransactionParameters) DeepCopyInto(out *QueryKillerIdleInTransactionParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKillerIdleInTransactionParameters. +func (in *QueryKillerIdleInTransactionParameters) DeepCopy() *QueryKillerIdleInTransactionParameters { + if in == nil { + return nil + } + out := new(QueryKillerIdleInTransactionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKillerIdleInitParameters) DeepCopyInto(out *QueryKillerIdleInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKillerIdleInitParameters. +func (in *QueryKillerIdleInitParameters) DeepCopy() *QueryKillerIdleInitParameters { + if in == nil { + return nil + } + out := new(QueryKillerIdleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKillerIdleObservation) DeepCopyInto(out *QueryKillerIdleObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKillerIdleObservation. +func (in *QueryKillerIdleObservation) DeepCopy() *QueryKillerIdleObservation { + if in == nil { + return nil + } + out := new(QueryKillerIdleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKillerIdleParameters) DeepCopyInto(out *QueryKillerIdleParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKillerIdleParameters. +func (in *QueryKillerIdleParameters) DeepCopy() *QueryKillerIdleParameters { + if in == nil { + return nil + } + out := new(QueryKillerIdleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKillerLongRunningInitParameters) DeepCopyInto(out *QueryKillerLongRunningInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKillerLongRunningInitParameters. +func (in *QueryKillerLongRunningInitParameters) DeepCopy() *QueryKillerLongRunningInitParameters { + if in == nil { + return nil + } + out := new(QueryKillerLongRunningInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKillerLongRunningObservation) DeepCopyInto(out *QueryKillerLongRunningObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKillerLongRunningObservation. +func (in *QueryKillerLongRunningObservation) DeepCopy() *QueryKillerLongRunningObservation { + if in == nil { + return nil + } + out := new(QueryKillerLongRunningObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKillerLongRunningParameters) DeepCopyInto(out *QueryKillerLongRunningParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKillerLongRunningParameters. +func (in *QueryKillerLongRunningParameters) DeepCopy() *QueryKillerLongRunningParameters { + if in == nil { + return nil + } + out := new(QueryKillerLongRunningParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryMaskingRulesInitParameters) DeepCopyInto(out *QueryMaskingRulesInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Regexp != nil { + in, out := &in.Regexp, &out.Regexp + *out = new(string) + **out = **in + } + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryMaskingRulesInitParameters. +func (in *QueryMaskingRulesInitParameters) DeepCopy() *QueryMaskingRulesInitParameters { + if in == nil { + return nil + } + out := new(QueryMaskingRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryMaskingRulesObservation) DeepCopyInto(out *QueryMaskingRulesObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Regexp != nil { + in, out := &in.Regexp, &out.Regexp + *out = new(string) + **out = **in + } + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryMaskingRulesObservation. +func (in *QueryMaskingRulesObservation) DeepCopy() *QueryMaskingRulesObservation { + if in == nil { + return nil + } + out := new(QueryMaskingRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryMaskingRulesParameters) DeepCopyInto(out *QueryMaskingRulesParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Regexp != nil { + in, out := &in.Regexp, &out.Regexp + *out = new(string) + **out = **in + } + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryMaskingRulesParameters. +func (in *QueryMaskingRulesParameters) DeepCopy() *QueryMaskingRulesParameters { + if in == nil { + return nil + } + out := new(QueryMaskingRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaInitParameters) DeepCopyInto(out *QuotaInitParameters) { + *out = *in + if in.Errors != nil { + in, out := &in.Errors, &out.Errors + *out = new(float64) + **out = **in + } + if in.ExecutionTime != nil { + in, out := &in.ExecutionTime, &out.ExecutionTime + *out = new(float64) + **out = **in + } + if in.IntervalDuration != nil { + in, out := &in.IntervalDuration, &out.IntervalDuration + *out = new(float64) + **out = **in + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = new(float64) + **out = **in + } + if in.ReadRows != nil { + in, out := &in.ReadRows, &out.ReadRows + *out = new(float64) + **out = **in + } + if in.ResultRows != nil { + in, out := &in.ResultRows, &out.ResultRows + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaInitParameters. +func (in *QuotaInitParameters) DeepCopy() *QuotaInitParameters { + if in == nil { + return nil + } + out := new(QuotaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaObservation) DeepCopyInto(out *QuotaObservation) { + *out = *in + if in.Errors != nil { + in, out := &in.Errors, &out.Errors + *out = new(float64) + **out = **in + } + if in.ExecutionTime != nil { + in, out := &in.ExecutionTime, &out.ExecutionTime + *out = new(float64) + **out = **in + } + if in.IntervalDuration != nil { + in, out := &in.IntervalDuration, &out.IntervalDuration + *out = new(float64) + **out = **in + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = new(float64) + **out = **in + } + if in.ReadRows != nil { + in, out := &in.ReadRows, &out.ReadRows + *out = new(float64) + **out = **in + } + if in.ResultRows != nil { + in, out := &in.ResultRows, &out.ResultRows + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaObservation. +func (in *QuotaObservation) DeepCopy() *QuotaObservation { + if in == nil { + return nil + } + out := new(QuotaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaParameters) DeepCopyInto(out *QuotaParameters) { + *out = *in + if in.Errors != nil { + in, out := &in.Errors, &out.Errors + *out = new(float64) + **out = **in + } + if in.ExecutionTime != nil { + in, out := &in.ExecutionTime, &out.ExecutionTime + *out = new(float64) + **out = **in + } + if in.IntervalDuration != nil { + in, out := &in.IntervalDuration, &out.IntervalDuration + *out = new(float64) + **out = **in + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = new(float64) + **out = **in + } + if in.ReadRows != nil { + in, out := &in.ReadRows, &out.ReadRows + *out = new(float64) + **out = **in + } + if in.ResultRows != nil { + in, out := &in.ResultRows, &out.ResultRows + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaParameters. +func (in *QuotaParameters) DeepCopy() *QuotaParameters { + if in == nil { + return nil + } + out := new(QuotaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RabbitmqInitParameters) DeepCopyInto(out *RabbitmqInitParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.Vhost != nil { + in, out := &in.Vhost, &out.Vhost + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RabbitmqInitParameters. +func (in *RabbitmqInitParameters) DeepCopy() *RabbitmqInitParameters { + if in == nil { + return nil + } + out := new(RabbitmqInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RabbitmqObservation) DeepCopyInto(out *RabbitmqObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.Vhost != nil { + in, out := &in.Vhost, &out.Vhost + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RabbitmqObservation. +func (in *RabbitmqObservation) DeepCopy() *RabbitmqObservation { + if in == nil { + return nil + } + out := new(RabbitmqObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RabbitmqParameters) DeepCopyInto(out *RabbitmqParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.Vhost != nil { + in, out := &in.Vhost, &out.Vhost + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RabbitmqParameters. +func (in *RabbitmqParameters) DeepCopy() *RabbitmqParameters { + if in == nil { + return nil + } + out := new(RabbitmqParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCluster) DeepCopyInto(out *RedisCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCluster. +func (in *RedisCluster) DeepCopy() *RedisCluster { + if in == nil { + return nil + } + out := new(RedisCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterAccessInitParameters) DeepCopyInto(out *RedisClusterAccessInitParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterAccessInitParameters. +func (in *RedisClusterAccessInitParameters) DeepCopy() *RedisClusterAccessInitParameters { + if in == nil { + return nil + } + out := new(RedisClusterAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterAccessObservation) DeepCopyInto(out *RedisClusterAccessObservation) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterAccessObservation. +func (in *RedisClusterAccessObservation) DeepCopy() *RedisClusterAccessObservation { + if in == nil { + return nil + } + out := new(RedisClusterAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterAccessParameters) DeepCopyInto(out *RedisClusterAccessParameters) { + *out = *in + if in.DataLens != nil { + in, out := &in.DataLens, &out.DataLens + *out = new(bool) + **out = **in + } + if in.WebSQL != nil { + in, out := &in.WebSQL, &out.WebSQL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterAccessParameters. +func (in *RedisClusterAccessParameters) DeepCopy() *RedisClusterAccessParameters { + if in == nil { + return nil + } + out := new(RedisClusterAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterConfigInitParameters) DeepCopyInto(out *RedisClusterConfigInitParameters) { + *out = *in + if in.ClientOutputBufferLimitNormal != nil { + in, out := &in.ClientOutputBufferLimitNormal, &out.ClientOutputBufferLimitNormal + *out = new(string) + **out = **in + } + if in.ClientOutputBufferLimitPubsub != nil { + in, out := &in.ClientOutputBufferLimitPubsub, &out.ClientOutputBufferLimitPubsub + *out = new(string) + **out = **in + } + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = new(float64) + **out = **in + } + if in.MaxmemoryPercent != nil { + in, out := &in.MaxmemoryPercent, &out.MaxmemoryPercent + *out = new(float64) + **out = **in + } + if in.MaxmemoryPolicy != nil { + in, out := &in.MaxmemoryPolicy, &out.MaxmemoryPolicy + *out = new(string) + **out = **in + } + if in.NotifyKeyspaceEvents != nil { + in, out := &in.NotifyKeyspaceEvents, &out.NotifyKeyspaceEvents + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.SlowlogLogSlowerThan != nil { + in, out := &in.SlowlogLogSlowerThan, &out.SlowlogLogSlowerThan + *out = new(float64) + **out = **in + } + if in.SlowlogMaxLen != nil { + in, out := &in.SlowlogMaxLen, &out.SlowlogMaxLen + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterConfigInitParameters. +func (in *RedisClusterConfigInitParameters) DeepCopy() *RedisClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(RedisClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterConfigObservation) DeepCopyInto(out *RedisClusterConfigObservation) { + *out = *in + if in.ClientOutputBufferLimitNormal != nil { + in, out := &in.ClientOutputBufferLimitNormal, &out.ClientOutputBufferLimitNormal + *out = new(string) + **out = **in + } + if in.ClientOutputBufferLimitPubsub != nil { + in, out := &in.ClientOutputBufferLimitPubsub, &out.ClientOutputBufferLimitPubsub + *out = new(string) + **out = **in + } + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = new(float64) + **out = **in + } + if in.MaxmemoryPercent != nil { + in, out := &in.MaxmemoryPercent, &out.MaxmemoryPercent + *out = new(float64) + **out = **in + } + if in.MaxmemoryPolicy != nil { + in, out := &in.MaxmemoryPolicy, &out.MaxmemoryPolicy + *out = new(string) + **out = **in + } + if in.NotifyKeyspaceEvents != nil { + in, out := &in.NotifyKeyspaceEvents, &out.NotifyKeyspaceEvents + *out = new(string) + **out = **in + } + if in.SlowlogLogSlowerThan != nil { + in, out := &in.SlowlogLogSlowerThan, &out.SlowlogLogSlowerThan + *out = new(float64) + **out = **in + } + if in.SlowlogMaxLen != nil { + in, out := &in.SlowlogMaxLen, &out.SlowlogMaxLen + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterConfigObservation. +func (in *RedisClusterConfigObservation) DeepCopy() *RedisClusterConfigObservation { + if in == nil { + return nil + } + out := new(RedisClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterConfigParameters) DeepCopyInto(out *RedisClusterConfigParameters) { + *out = *in + if in.ClientOutputBufferLimitNormal != nil { + in, out := &in.ClientOutputBufferLimitNormal, &out.ClientOutputBufferLimitNormal + *out = new(string) + **out = **in + } + if in.ClientOutputBufferLimitPubsub != nil { + in, out := &in.ClientOutputBufferLimitPubsub, &out.ClientOutputBufferLimitPubsub + *out = new(string) + **out = **in + } + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = new(float64) + **out = **in + } + if in.MaxmemoryPercent != nil { + in, out := &in.MaxmemoryPercent, &out.MaxmemoryPercent + *out = new(float64) + **out = **in + } + if in.MaxmemoryPolicy != nil { + in, out := &in.MaxmemoryPolicy, &out.MaxmemoryPolicy + *out = new(string) + **out = **in + } + if in.NotifyKeyspaceEvents != nil { + in, out := &in.NotifyKeyspaceEvents, &out.NotifyKeyspaceEvents + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.SlowlogLogSlowerThan != nil { + in, out := &in.SlowlogLogSlowerThan, &out.SlowlogLogSlowerThan + *out = new(float64) + **out = **in + } + if in.SlowlogMaxLen != nil { + in, out := &in.SlowlogMaxLen, &out.SlowlogMaxLen + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterConfigParameters. +func (in *RedisClusterConfigParameters) DeepCopy() *RedisClusterConfigParameters { + if in == nil { + return nil + } + out := new(RedisClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterDiskSizeAutoscalingInitParameters) DeepCopyInto(out *RedisClusterDiskSizeAutoscalingInitParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterDiskSizeAutoscalingInitParameters. +func (in *RedisClusterDiskSizeAutoscalingInitParameters) DeepCopy() *RedisClusterDiskSizeAutoscalingInitParameters { + if in == nil { + return nil + } + out := new(RedisClusterDiskSizeAutoscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterDiskSizeAutoscalingObservation) DeepCopyInto(out *RedisClusterDiskSizeAutoscalingObservation) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterDiskSizeAutoscalingObservation. +func (in *RedisClusterDiskSizeAutoscalingObservation) DeepCopy() *RedisClusterDiskSizeAutoscalingObservation { + if in == nil { + return nil + } + out := new(RedisClusterDiskSizeAutoscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterDiskSizeAutoscalingParameters) DeepCopyInto(out *RedisClusterDiskSizeAutoscalingParameters) { + *out = *in + if in.DiskSizeLimit != nil { + in, out := &in.DiskSizeLimit, &out.DiskSizeLimit + *out = new(float64) + **out = **in + } + if in.EmergencyUsageThreshold != nil { + in, out := &in.EmergencyUsageThreshold, &out.EmergencyUsageThreshold + *out = new(float64) + **out = **in + } + if in.PlannedUsageThreshold != nil { + in, out := &in.PlannedUsageThreshold, &out.PlannedUsageThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterDiskSizeAutoscalingParameters. +func (in *RedisClusterDiskSizeAutoscalingParameters) DeepCopy() *RedisClusterDiskSizeAutoscalingParameters { + if in == nil { + return nil + } + out := new(RedisClusterDiskSizeAutoscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterHostInitParameters) DeepCopyInto(out *RedisClusterHostInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.ReplicaPriority != nil { + in, out := &in.ReplicaPriority, &out.ReplicaPriority + *out = new(float64) + **out = **in + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterHostInitParameters. +func (in *RedisClusterHostInitParameters) DeepCopy() *RedisClusterHostInitParameters { + if in == nil { + return nil + } + out := new(RedisClusterHostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterHostObservation) DeepCopyInto(out *RedisClusterHostObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.ReplicaPriority != nil { + in, out := &in.ReplicaPriority, &out.ReplicaPriority + *out = new(float64) + **out = **in + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterHostObservation. +func (in *RedisClusterHostObservation) DeepCopy() *RedisClusterHostObservation { + if in == nil { + return nil + } + out := new(RedisClusterHostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterHostParameters) DeepCopyInto(out *RedisClusterHostParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.ReplicaPriority != nil { + in, out := &in.ReplicaPriority, &out.ReplicaPriority + *out = new(float64) + **out = **in + } + if in.ShardName != nil { + in, out := &in.ShardName, &out.ShardName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterHostParameters. +func (in *RedisClusterHostParameters) DeepCopy() *RedisClusterHostParameters { + if in == nil { + return nil + } + out := new(RedisClusterHostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterInitParameters) DeepCopyInto(out *RedisClusterInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]RedisClusterAccessInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnnounceHostnames != nil { + in, out := &in.AnnounceHostnames, &out.AnnounceHostnames + *out = new(bool) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]RedisClusterConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSizeAutoscaling != nil { + in, out := &in.DiskSizeAutoscaling, &out.DiskSizeAutoscaling + *out = make([]RedisClusterDiskSizeAutoscalingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]RedisClusterHostInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]RedisClusterMaintenanceWindowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PersistenceMode != nil { + in, out := &in.PersistenceMode, &out.PersistenceMode + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]RedisClusterResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sharded != nil { + in, out := &in.Sharded, &out.Sharded + *out = new(bool) + **out = **in + } + if in.TLSEnabled != nil { + in, out := &in.TLSEnabled, &out.TLSEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterInitParameters. +func (in *RedisClusterInitParameters) DeepCopy() *RedisClusterInitParameters { + if in == nil { + return nil + } + out := new(RedisClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterList) DeepCopyInto(out *RedisClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RedisCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterList. +func (in *RedisClusterList) DeepCopy() *RedisClusterList { + if in == nil { + return nil + } + out := new(RedisClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterMaintenanceWindowInitParameters) DeepCopyInto(out *RedisClusterMaintenanceWindowInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterMaintenanceWindowInitParameters. +func (in *RedisClusterMaintenanceWindowInitParameters) DeepCopy() *RedisClusterMaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(RedisClusterMaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterMaintenanceWindowObservation) DeepCopyInto(out *RedisClusterMaintenanceWindowObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterMaintenanceWindowObservation. +func (in *RedisClusterMaintenanceWindowObservation) DeepCopy() *RedisClusterMaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(RedisClusterMaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterMaintenanceWindowParameters) DeepCopyInto(out *RedisClusterMaintenanceWindowParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterMaintenanceWindowParameters. +func (in *RedisClusterMaintenanceWindowParameters) DeepCopy() *RedisClusterMaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(RedisClusterMaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterObservation) DeepCopyInto(out *RedisClusterObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]RedisClusterAccessObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnnounceHostnames != nil { + in, out := &in.AnnounceHostnames, &out.AnnounceHostnames + *out = new(bool) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]RedisClusterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSizeAutoscaling != nil { + in, out := &in.DiskSizeAutoscaling, &out.DiskSizeAutoscaling + *out = make([]RedisClusterDiskSizeAutoscalingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]RedisClusterHostObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]RedisClusterMaintenanceWindowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.PersistenceMode != nil { + in, out := &in.PersistenceMode, &out.PersistenceMode + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]RedisClusterResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Sharded != nil { + in, out := &in.Sharded, &out.Sharded + *out = new(bool) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TLSEnabled != nil { + in, out := &in.TLSEnabled, &out.TLSEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterObservation. +func (in *RedisClusterObservation) DeepCopy() *RedisClusterObservation { + if in == nil { + return nil + } + out := new(RedisClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterParameters) DeepCopyInto(out *RedisClusterParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]RedisClusterAccessParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnnounceHostnames != nil { + in, out := &in.AnnounceHostnames, &out.AnnounceHostnames + *out = new(bool) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]RedisClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSizeAutoscaling != nil { + in, out := &in.DiskSizeAutoscaling, &out.DiskSizeAutoscaling + *out = make([]RedisClusterDiskSizeAutoscalingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]RedisClusterHostParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = make([]RedisClusterMaintenanceWindowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PersistenceMode != nil { + in, out := &in.PersistenceMode, &out.PersistenceMode + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]RedisClusterResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sharded != nil { + in, out := &in.Sharded, &out.Sharded + *out = new(bool) + **out = **in + } + if in.TLSEnabled != nil { + in, out := &in.TLSEnabled, &out.TLSEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterParameters. +func (in *RedisClusterParameters) DeepCopy() *RedisClusterParameters { + if in == nil { + return nil + } + out := new(RedisClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterResourcesInitParameters) DeepCopyInto(out *RedisClusterResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterResourcesInitParameters. +func (in *RedisClusterResourcesInitParameters) DeepCopy() *RedisClusterResourcesInitParameters { + if in == nil { + return nil + } + out := new(RedisClusterResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterResourcesObservation) DeepCopyInto(out *RedisClusterResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterResourcesObservation. +func (in *RedisClusterResourcesObservation) DeepCopy() *RedisClusterResourcesObservation { + if in == nil { + return nil + } + out := new(RedisClusterResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterResourcesParameters) DeepCopyInto(out *RedisClusterResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterResourcesParameters. +func (in *RedisClusterResourcesParameters) DeepCopy() *RedisClusterResourcesParameters { + if in == nil { + return nil + } + out := new(RedisClusterResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterSpec) DeepCopyInto(out *RedisClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterSpec. +func (in *RedisClusterSpec) DeepCopy() *RedisClusterSpec { + if in == nil { + return nil + } + out := new(RedisClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterStatus) DeepCopyInto(out *RedisClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterStatus. +func (in *RedisClusterStatus) DeepCopy() *RedisClusterStatus { + if in == nil { + return nil + } + out := new(RedisClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesInitParameters) DeepCopyInto(out *ResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesInitParameters. +func (in *ResourcesInitParameters) DeepCopy() *ResourcesInitParameters { + if in == nil { + return nil + } + out := new(ResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongocfgInitParameters) DeepCopyInto(out *ResourcesMongocfgInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongocfgInitParameters. +func (in *ResourcesMongocfgInitParameters) DeepCopy() *ResourcesMongocfgInitParameters { + if in == nil { + return nil + } + out := new(ResourcesMongocfgInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongocfgObservation) DeepCopyInto(out *ResourcesMongocfgObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongocfgObservation. +func (in *ResourcesMongocfgObservation) DeepCopy() *ResourcesMongocfgObservation { + if in == nil { + return nil + } + out := new(ResourcesMongocfgObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongocfgParameters) DeepCopyInto(out *ResourcesMongocfgParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongocfgParameters. +func (in *ResourcesMongocfgParameters) DeepCopy() *ResourcesMongocfgParameters { + if in == nil { + return nil + } + out := new(ResourcesMongocfgParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongodInitParameters) DeepCopyInto(out *ResourcesMongodInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongodInitParameters. +func (in *ResourcesMongodInitParameters) DeepCopy() *ResourcesMongodInitParameters { + if in == nil { + return nil + } + out := new(ResourcesMongodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongodObservation) DeepCopyInto(out *ResourcesMongodObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongodObservation. +func (in *ResourcesMongodObservation) DeepCopy() *ResourcesMongodObservation { + if in == nil { + return nil + } + out := new(ResourcesMongodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongodParameters) DeepCopyInto(out *ResourcesMongodParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongodParameters. +func (in *ResourcesMongodParameters) DeepCopy() *ResourcesMongodParameters { + if in == nil { + return nil + } + out := new(ResourcesMongodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongoinfraInitParameters) DeepCopyInto(out *ResourcesMongoinfraInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongoinfraInitParameters. +func (in *ResourcesMongoinfraInitParameters) DeepCopy() *ResourcesMongoinfraInitParameters { + if in == nil { + return nil + } + out := new(ResourcesMongoinfraInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongoinfraObservation) DeepCopyInto(out *ResourcesMongoinfraObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongoinfraObservation. +func (in *ResourcesMongoinfraObservation) DeepCopy() *ResourcesMongoinfraObservation { + if in == nil { + return nil + } + out := new(ResourcesMongoinfraObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongoinfraParameters) DeepCopyInto(out *ResourcesMongoinfraParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongoinfraParameters. +func (in *ResourcesMongoinfraParameters) DeepCopy() *ResourcesMongoinfraParameters { + if in == nil { + return nil + } + out := new(ResourcesMongoinfraParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongosInitParameters) DeepCopyInto(out *ResourcesMongosInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongosInitParameters. +func (in *ResourcesMongosInitParameters) DeepCopy() *ResourcesMongosInitParameters { + if in == nil { + return nil + } + out := new(ResourcesMongosInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongosObservation) DeepCopyInto(out *ResourcesMongosObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongosObservation. +func (in *ResourcesMongosObservation) DeepCopy() *ResourcesMongosObservation { + if in == nil { + return nil + } + out := new(ResourcesMongosObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesMongosParameters) DeepCopyInto(out *ResourcesMongosParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesMongosParameters. +func (in *ResourcesMongosParameters) DeepCopy() *ResourcesMongosParameters { + if in == nil { + return nil + } + out := new(ResourcesMongosParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesObservation) DeepCopyInto(out *ResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesObservation. +func (in *ResourcesObservation) DeepCopy() *ResourcesObservation { + if in == nil { + return nil + } + out := new(ResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesParameters) DeepCopyInto(out *ResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesParameters. +func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { + if in == nil { + return nil + } + out := new(ResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreInitParameters) DeepCopyInto(out *RestoreInitParameters) { + *out = *in + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreInitParameters. +func (in *RestoreInitParameters) DeepCopy() *RestoreInitParameters { + if in == nil { + return nil + } + out := new(RestoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreObservation) DeepCopyInto(out *RestoreObservation) { + *out = *in + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreObservation. +func (in *RestoreObservation) DeepCopy() *RestoreObservation { + if in == nil { + return nil + } + out := new(RestoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreParameters) DeepCopyInto(out *RestoreParameters) { + *out = *in + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreParameters. +func (in *RestoreParameters) DeepCopy() *RestoreParameters { + if in == nil { + return nil + } + out := new(RestoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionInitParameters) DeepCopyInto(out *RetentionInitParameters) { + *out = *in + if in.Age != nil { + in, out := &in.Age, &out.Age + *out = new(float64) + **out = **in + } + if in.Precision != nil { + in, out := &in.Precision, &out.Precision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionInitParameters. +func (in *RetentionInitParameters) DeepCopy() *RetentionInitParameters { + if in == nil { + return nil + } + out := new(RetentionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionObservation) DeepCopyInto(out *RetentionObservation) { + *out = *in + if in.Age != nil { + in, out := &in.Age, &out.Age + *out = new(float64) + **out = **in + } + if in.Precision != nil { + in, out := &in.Precision, &out.Precision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionObservation. +func (in *RetentionObservation) DeepCopy() *RetentionObservation { + if in == nil { + return nil + } + out := new(RetentionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionParameters) DeepCopyInto(out *RetentionParameters) { + *out = *in + if in.Age != nil { + in, out := &in.Age, &out.Age + *out = new(float64) + **out = **in + } + if in.Precision != nil { + in, out := &in.Precision, &out.Precision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionParameters. +func (in *RetentionParameters) DeepCopy() *RetentionParameters { + if in == nil { + return nil + } + out := new(RetentionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConnectionInitParameters) DeepCopyInto(out *S3ConnectionInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ExternalS3 != nil { + in, out := &in.ExternalS3, &out.ExternalS3 + *out = make([]ExternalS3InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConnectionInitParameters. +func (in *S3ConnectionInitParameters) DeepCopy() *S3ConnectionInitParameters { + if in == nil { + return nil + } + out := new(S3ConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConnectionObservation) DeepCopyInto(out *S3ConnectionObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ExternalS3 != nil { + in, out := &in.ExternalS3, &out.ExternalS3 + *out = make([]ExternalS3Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConnectionObservation. +func (in *S3ConnectionObservation) DeepCopy() *S3ConnectionObservation { + if in == nil { + return nil + } + out := new(S3ConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConnectionParameters) DeepCopyInto(out *S3ConnectionParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ExternalS3 != nil { + in, out := &in.ExternalS3, &out.ExternalS3 + *out = make([]ExternalS3Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConnectionParameters. +func (in *S3ConnectionParameters) DeepCopy() *S3ConnectionParameters { + if in == nil { + return nil + } + out := new(S3ConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityInitParameters) DeepCopyInto(out *SecurityInitParameters) { + *out = *in + if in.EnableEncryption != nil { + in, out := &in.EnableEncryption, &out.EnableEncryption + *out = new(bool) + **out = **in + } + if in.Kmip != nil { + in, out := &in.Kmip, &out.Kmip + *out = make([]KmipInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityInitParameters. +func (in *SecurityInitParameters) DeepCopy() *SecurityInitParameters { + if in == nil { + return nil + } + out := new(SecurityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityObservation) DeepCopyInto(out *SecurityObservation) { + *out = *in + if in.EnableEncryption != nil { + in, out := &in.EnableEncryption, &out.EnableEncryption + *out = new(bool) + **out = **in + } + if in.Kmip != nil { + in, out := &in.Kmip, &out.Kmip + *out = make([]KmipObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityObservation. +func (in *SecurityObservation) DeepCopy() *SecurityObservation { + if in == nil { + return nil + } + out := new(SecurityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityParameters) DeepCopyInto(out *SecurityParameters) { + *out = *in + if in.EnableEncryption != nil { + in, out := &in.EnableEncryption, &out.EnableEncryption + *out = new(bool) + **out = **in + } + if in.Kmip != nil { + in, out := &in.Kmip, &out.Kmip + *out = make([]KmipParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityParameters. +func (in *SecurityParameters) DeepCopy() *SecurityParameters { + if in == nil { + return nil + } + out := new(SecurityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SegmentHostsInitParameters) DeepCopyInto(out *SegmentHostsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SegmentHostsInitParameters. +func (in *SegmentHostsInitParameters) DeepCopy() *SegmentHostsInitParameters { + if in == nil { + return nil + } + out := new(SegmentHostsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SegmentHostsObservation) DeepCopyInto(out *SegmentHostsObservation) { + *out = *in + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SegmentHostsObservation. +func (in *SegmentHostsObservation) DeepCopy() *SegmentHostsObservation { + if in == nil { + return nil + } + out := new(SegmentHostsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SegmentHostsParameters) DeepCopyInto(out *SegmentHostsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SegmentHostsParameters. +func (in *SegmentHostsParameters) DeepCopy() *SegmentHostsParameters { + if in == nil { + return nil + } + out := new(SegmentHostsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SegmentSubclusterInitParameters) DeepCopyInto(out *SegmentSubclusterInitParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]SegmentSubclusterResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SegmentSubclusterInitParameters. +func (in *SegmentSubclusterInitParameters) DeepCopy() *SegmentSubclusterInitParameters { + if in == nil { + return nil + } + out := new(SegmentSubclusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SegmentSubclusterObservation) DeepCopyInto(out *SegmentSubclusterObservation) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]SegmentSubclusterResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SegmentSubclusterObservation. +func (in *SegmentSubclusterObservation) DeepCopy() *SegmentSubclusterObservation { + if in == nil { + return nil + } + out := new(SegmentSubclusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SegmentSubclusterParameters) DeepCopyInto(out *SegmentSubclusterParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]SegmentSubclusterResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SegmentSubclusterParameters. +func (in *SegmentSubclusterParameters) DeepCopy() *SegmentSubclusterParameters { + if in == nil { + return nil + } + out := new(SegmentSubclusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SegmentSubclusterResourcesInitParameters) DeepCopyInto(out *SegmentSubclusterResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SegmentSubclusterResourcesInitParameters. +func (in *SegmentSubclusterResourcesInitParameters) DeepCopy() *SegmentSubclusterResourcesInitParameters { + if in == nil { + return nil + } + out := new(SegmentSubclusterResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SegmentSubclusterResourcesObservation) DeepCopyInto(out *SegmentSubclusterResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SegmentSubclusterResourcesObservation. +func (in *SegmentSubclusterResourcesObservation) DeepCopy() *SegmentSubclusterResourcesObservation { + if in == nil { + return nil + } + out := new(SegmentSubclusterResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SegmentSubclusterResourcesParameters) DeepCopyInto(out *SegmentSubclusterResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SegmentSubclusterResourcesParameters. +func (in *SegmentSubclusterResourcesParameters) DeepCopy() *SegmentSubclusterResourcesParameters { + if in == nil { + return nil + } + out := new(SegmentSubclusterResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SetParameterInitParameters) DeepCopyInto(out *SetParameterInitParameters) { + *out = *in + if in.AuditAuthorizationSuccess != nil { + in, out := &in.AuditAuthorizationSuccess, &out.AuditAuthorizationSuccess + *out = new(bool) + **out = **in + } + if in.EnableFlowControl != nil { + in, out := &in.EnableFlowControl, &out.EnableFlowControl + *out = new(bool) + **out = **in + } + if in.MinSnapshotHistoryWindowInSeconds != nil { + in, out := &in.MinSnapshotHistoryWindowInSeconds, &out.MinSnapshotHistoryWindowInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SetParameterInitParameters. +func (in *SetParameterInitParameters) DeepCopy() *SetParameterInitParameters { + if in == nil { + return nil + } + out := new(SetParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SetParameterObservation) DeepCopyInto(out *SetParameterObservation) { + *out = *in + if in.AuditAuthorizationSuccess != nil { + in, out := &in.AuditAuthorizationSuccess, &out.AuditAuthorizationSuccess + *out = new(bool) + **out = **in + } + if in.EnableFlowControl != nil { + in, out := &in.EnableFlowControl, &out.EnableFlowControl + *out = new(bool) + **out = **in + } + if in.MinSnapshotHistoryWindowInSeconds != nil { + in, out := &in.MinSnapshotHistoryWindowInSeconds, &out.MinSnapshotHistoryWindowInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SetParameterObservation. +func (in *SetParameterObservation) DeepCopy() *SetParameterObservation { + if in == nil { + return nil + } + out := new(SetParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SetParameterParameters) DeepCopyInto(out *SetParameterParameters) { + *out = *in + if in.AuditAuthorizationSuccess != nil { + in, out := &in.AuditAuthorizationSuccess, &out.AuditAuthorizationSuccess + *out = new(bool) + **out = **in + } + if in.EnableFlowControl != nil { + in, out := &in.EnableFlowControl, &out.EnableFlowControl + *out = new(bool) + **out = **in + } + if in.MinSnapshotHistoryWindowInSeconds != nil { + in, out := &in.MinSnapshotHistoryWindowInSeconds, &out.MinSnapshotHistoryWindowInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SetParameterParameters. +func (in *SetParameterParameters) DeepCopy() *SetParameterParameters { + if in == nil { + return nil + } + out := new(SetParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsInitParameters) DeepCopyInto(out *SettingsInitParameters) { + *out = *in + if in.AutoOffsetReset != nil { + in, out := &in.AutoOffsetReset, &out.AutoOffsetReset + *out = new(string) + **out = **in + } + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(string) + **out = **in + } + if in.EnableSSLCertificateVerification != nil { + in, out := &in.EnableSSLCertificateVerification, &out.EnableSSLCertificateVerification + *out = new(bool) + **out = **in + } + if in.MaxPollIntervalMs != nil { + in, out := &in.MaxPollIntervalMs, &out.MaxPollIntervalMs + *out = new(float64) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } + if in.SessionTimeoutMs != nil { + in, out := &in.SessionTimeoutMs, &out.SessionTimeoutMs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsInitParameters. +func (in *SettingsInitParameters) DeepCopy() *SettingsInitParameters { + if in == nil { + return nil + } + out := new(SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsObservation) DeepCopyInto(out *SettingsObservation) { + *out = *in + if in.AutoOffsetReset != nil { + in, out := &in.AutoOffsetReset, &out.AutoOffsetReset + *out = new(string) + **out = **in + } + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(string) + **out = **in + } + if in.EnableSSLCertificateVerification != nil { + in, out := &in.EnableSSLCertificateVerification, &out.EnableSSLCertificateVerification + *out = new(bool) + **out = **in + } + if in.MaxPollIntervalMs != nil { + in, out := &in.MaxPollIntervalMs, &out.MaxPollIntervalMs + *out = new(float64) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } + if in.SessionTimeoutMs != nil { + in, out := &in.SessionTimeoutMs, &out.SessionTimeoutMs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsObservation. +func (in *SettingsObservation) DeepCopy() *SettingsObservation { + if in == nil { + return nil + } + out := new(SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsParameters) DeepCopyInto(out *SettingsParameters) { + *out = *in + if in.AutoOffsetReset != nil { + in, out := &in.AutoOffsetReset, &out.AutoOffsetReset + *out = new(string) + **out = **in + } + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(string) + **out = **in + } + if in.EnableSSLCertificateVerification != nil { + in, out := &in.EnableSSLCertificateVerification, &out.EnableSSLCertificateVerification + *out = new(bool) + **out = **in + } + if in.MaxPollIntervalMs != nil { + in, out := &in.MaxPollIntervalMs, &out.MaxPollIntervalMs + *out = new(float64) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } + if in.SessionTimeoutMs != nil { + in, out := &in.SessionTimeoutMs, &out.SessionTimeoutMs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsParameters. +func (in *SettingsParameters) DeepCopy() *SettingsParameters { + if in == nil { + return nil + } + out := new(SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardGroupInitParameters) DeepCopyInto(out *ShardGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShardNames != nil { + in, out := &in.ShardNames, &out.ShardNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardGroupInitParameters. +func (in *ShardGroupInitParameters) DeepCopy() *ShardGroupInitParameters { + if in == nil { + return nil + } + out := new(ShardGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardGroupObservation) DeepCopyInto(out *ShardGroupObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShardNames != nil { + in, out := &in.ShardNames, &out.ShardNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardGroupObservation. +func (in *ShardGroupObservation) DeepCopy() *ShardGroupObservation { + if in == nil { + return nil + } + out := new(ShardGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardGroupParameters) DeepCopyInto(out *ShardGroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShardNames != nil { + in, out := &in.ShardNames, &out.ShardNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardGroupParameters. +func (in *ShardGroupParameters) DeepCopy() *ShardGroupParameters { + if in == nil { + return nil + } + out := new(ShardGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardInitParameters) DeepCopyInto(out *ShardInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ShardResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardInitParameters. +func (in *ShardInitParameters) DeepCopy() *ShardInitParameters { + if in == nil { + return nil + } + out := new(ShardInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardObservation) DeepCopyInto(out *ShardObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ShardResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardObservation. +func (in *ShardObservation) DeepCopy() *ShardObservation { + if in == nil { + return nil + } + out := new(ShardObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardParameters) DeepCopyInto(out *ShardParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ShardResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardParameters. +func (in *ShardParameters) DeepCopy() *ShardParameters { + if in == nil { + return nil + } + out := new(ShardParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardResourcesInitParameters) DeepCopyInto(out *ShardResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardResourcesInitParameters. +func (in *ShardResourcesInitParameters) DeepCopy() *ShardResourcesInitParameters { + if in == nil { + return nil + } + out := new(ShardResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardResourcesObservation) DeepCopyInto(out *ShardResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardResourcesObservation. +func (in *ShardResourcesObservation) DeepCopy() *ShardResourcesObservation { + if in == nil { + return nil + } + out := new(ShardResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardResourcesParameters) DeepCopyInto(out *ShardResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardResourcesParameters. +func (in *ShardResourcesParameters) DeepCopy() *ShardResourcesParameters { + if in == nil { + return nil + } + out := new(ShardResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceClusterInitParameters) DeepCopyInto(out *SourceClusterInitParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.ExternalCluster != nil { + in, out := &in.ExternalCluster, &out.ExternalCluster + *out = make([]ExternalClusterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThisCluster != nil { + in, out := &in.ThisCluster, &out.ThisCluster + *out = make([]ThisClusterInitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceClusterInitParameters. +func (in *SourceClusterInitParameters) DeepCopy() *SourceClusterInitParameters { + if in == nil { + return nil + } + out := new(SourceClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceClusterObservation) DeepCopyInto(out *SourceClusterObservation) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.ExternalCluster != nil { + in, out := &in.ExternalCluster, &out.ExternalCluster + *out = make([]ExternalClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThisCluster != nil { + in, out := &in.ThisCluster, &out.ThisCluster + *out = make([]ThisClusterParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceClusterObservation. +func (in *SourceClusterObservation) DeepCopy() *SourceClusterObservation { + if in == nil { + return nil + } + out := new(SourceClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceClusterParameters) DeepCopyInto(out *SourceClusterParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.ExternalCluster != nil { + in, out := &in.ExternalCluster, &out.ExternalCluster + *out = make([]ExternalClusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThisCluster != nil { + in, out := &in.ThisCluster, &out.ThisCluster + *out = make([]ThisClusterParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceClusterParameters. +func (in *SourceClusterParameters) DeepCopy() *SourceClusterParameters { + if in == nil { + return nil + } + out := new(SourceClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverCluster) DeepCopyInto(out *SqlserverCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverCluster. +func (in *SqlserverCluster) DeepCopy() *SqlserverCluster { + if in == nil { + return nil + } + out := new(SqlserverCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SqlserverCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterBackupWindowStartInitParameters) DeepCopyInto(out *SqlserverClusterBackupWindowStartInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterBackupWindowStartInitParameters. +func (in *SqlserverClusterBackupWindowStartInitParameters) DeepCopy() *SqlserverClusterBackupWindowStartInitParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterBackupWindowStartInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterBackupWindowStartObservation) DeepCopyInto(out *SqlserverClusterBackupWindowStartObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterBackupWindowStartObservation. +func (in *SqlserverClusterBackupWindowStartObservation) DeepCopy() *SqlserverClusterBackupWindowStartObservation { + if in == nil { + return nil + } + out := new(SqlserverClusterBackupWindowStartObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterBackupWindowStartParameters) DeepCopyInto(out *SqlserverClusterBackupWindowStartParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterBackupWindowStartParameters. +func (in *SqlserverClusterBackupWindowStartParameters) DeepCopy() *SqlserverClusterBackupWindowStartParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterBackupWindowStartParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterDatabaseInitParameters) DeepCopyInto(out *SqlserverClusterDatabaseInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterDatabaseInitParameters. +func (in *SqlserverClusterDatabaseInitParameters) DeepCopy() *SqlserverClusterDatabaseInitParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterDatabaseObservation) DeepCopyInto(out *SqlserverClusterDatabaseObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterDatabaseObservation. +func (in *SqlserverClusterDatabaseObservation) DeepCopy() *SqlserverClusterDatabaseObservation { + if in == nil { + return nil + } + out := new(SqlserverClusterDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterDatabaseParameters) DeepCopyInto(out *SqlserverClusterDatabaseParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterDatabaseParameters. +func (in *SqlserverClusterDatabaseParameters) DeepCopy() *SqlserverClusterDatabaseParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterHostInitParameters) DeepCopyInto(out *SqlserverClusterHostInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterHostInitParameters. +func (in *SqlserverClusterHostInitParameters) DeepCopy() *SqlserverClusterHostInitParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterHostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterHostObservation) DeepCopyInto(out *SqlserverClusterHostObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterHostObservation. +func (in *SqlserverClusterHostObservation) DeepCopy() *SqlserverClusterHostObservation { + if in == nil { + return nil + } + out := new(SqlserverClusterHostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterHostParameters) DeepCopyInto(out *SqlserverClusterHostParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterHostParameters. +func (in *SqlserverClusterHostParameters) DeepCopy() *SqlserverClusterHostParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterHostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterInitParameters) DeepCopyInto(out *SqlserverClusterInitParameters) { + *out = *in + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]SqlserverClusterBackupWindowStartInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]SqlserverClusterDatabaseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]SqlserverClusterHostInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]SqlserverClusterResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sqlcollation != nil { + in, out := &in.Sqlcollation, &out.Sqlcollation + *out = new(string) + **out = **in + } + if in.SqlserverConfig != nil { + in, out := &in.SqlserverConfig, &out.SqlserverConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]SqlserverClusterUserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterInitParameters. +func (in *SqlserverClusterInitParameters) DeepCopy() *SqlserverClusterInitParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterList) DeepCopyInto(out *SqlserverClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SqlserverCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterList. +func (in *SqlserverClusterList) DeepCopy() *SqlserverClusterList { + if in == nil { + return nil + } + out := new(SqlserverClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SqlserverClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterObservation) DeepCopyInto(out *SqlserverClusterObservation) { + *out = *in + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]SqlserverClusterBackupWindowStartObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]SqlserverClusterDatabaseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Health != nil { + in, out := &in.Health, &out.Health + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]SqlserverClusterHostObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]SqlserverClusterResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Sqlcollation != nil { + in, out := &in.Sqlcollation, &out.Sqlcollation + *out = new(string) + **out = **in + } + if in.SqlserverConfig != nil { + in, out := &in.SqlserverConfig, &out.SqlserverConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]SqlserverClusterUserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterObservation. +func (in *SqlserverClusterObservation) DeepCopy() *SqlserverClusterObservation { + if in == nil { + return nil + } + out := new(SqlserverClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterParameters) DeepCopyInto(out *SqlserverClusterParameters) { + *out = *in + if in.BackupWindowStart != nil { + in, out := &in.BackupWindowStart, &out.BackupWindowStart + *out = make([]SqlserverClusterBackupWindowStartParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]SqlserverClusterDatabaseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]SqlserverClusterHostParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]SqlserverClusterResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIdsRefs != nil { + in, out := &in.SecurityGroupIdsRefs, &out.SecurityGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIdsSelector != nil { + in, out := &in.SecurityGroupIdsSelector, &out.SecurityGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sqlcollation != nil { + in, out := &in.Sqlcollation, &out.Sqlcollation + *out = new(string) + **out = **in + } + if in.SqlserverConfig != nil { + in, out := &in.SqlserverConfig, &out.SqlserverConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]SqlserverClusterUserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterParameters. +func (in *SqlserverClusterParameters) DeepCopy() *SqlserverClusterParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterResourcesInitParameters) DeepCopyInto(out *SqlserverClusterResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterResourcesInitParameters. +func (in *SqlserverClusterResourcesInitParameters) DeepCopy() *SqlserverClusterResourcesInitParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterResourcesObservation) DeepCopyInto(out *SqlserverClusterResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterResourcesObservation. +func (in *SqlserverClusterResourcesObservation) DeepCopy() *SqlserverClusterResourcesObservation { + if in == nil { + return nil + } + out := new(SqlserverClusterResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterResourcesParameters) DeepCopyInto(out *SqlserverClusterResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterResourcesParameters. +func (in *SqlserverClusterResourcesParameters) DeepCopy() *SqlserverClusterResourcesParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterSpec) DeepCopyInto(out *SqlserverClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterSpec. +func (in *SqlserverClusterSpec) DeepCopy() *SqlserverClusterSpec { + if in == nil { + return nil + } + out := new(SqlserverClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterStatus) DeepCopyInto(out *SqlserverClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterStatus. +func (in *SqlserverClusterStatus) DeepCopy() *SqlserverClusterStatus { + if in == nil { + return nil + } + out := new(SqlserverClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterUserInitParameters) DeepCopyInto(out *SqlserverClusterUserInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]SqlserverClusterUserPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterUserInitParameters. +func (in *SqlserverClusterUserInitParameters) DeepCopy() *SqlserverClusterUserInitParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterUserObservation) DeepCopyInto(out *SqlserverClusterUserObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]SqlserverClusterUserPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterUserObservation. +func (in *SqlserverClusterUserObservation) DeepCopy() *SqlserverClusterUserObservation { + if in == nil { + return nil + } + out := new(SqlserverClusterUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterUserParameters) DeepCopyInto(out *SqlserverClusterUserParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]SqlserverClusterUserPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterUserParameters. +func (in *SqlserverClusterUserParameters) DeepCopy() *SqlserverClusterUserParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterUserPermissionInitParameters) DeepCopyInto(out *SqlserverClusterUserPermissionInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterUserPermissionInitParameters. +func (in *SqlserverClusterUserPermissionInitParameters) DeepCopy() *SqlserverClusterUserPermissionInitParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterUserPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterUserPermissionObservation) DeepCopyInto(out *SqlserverClusterUserPermissionObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterUserPermissionObservation. +func (in *SqlserverClusterUserPermissionObservation) DeepCopy() *SqlserverClusterUserPermissionObservation { + if in == nil { + return nil + } + out := new(SqlserverClusterUserPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqlserverClusterUserPermissionParameters) DeepCopyInto(out *SqlserverClusterUserPermissionParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqlserverClusterUserPermissionParameters. +func (in *SqlserverClusterUserPermissionParameters) DeepCopy() *SqlserverClusterUserPermissionParameters { + if in == nil { + return nil + } + out := new(SqlserverClusterUserPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageInitParameters) DeepCopyInto(out *StorageInitParameters) { + *out = *in + if in.WiredTiger != nil { + in, out := &in.WiredTiger, &out.WiredTiger + *out = make([]WiredTigerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageInitParameters. +func (in *StorageInitParameters) DeepCopy() *StorageInitParameters { + if in == nil { + return nil + } + out := new(StorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageObservation) DeepCopyInto(out *StorageObservation) { + *out = *in + if in.WiredTiger != nil { + in, out := &in.WiredTiger, &out.WiredTiger + *out = make([]WiredTigerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageObservation. +func (in *StorageObservation) DeepCopy() *StorageObservation { + if in == nil { + return nil + } + out := new(StorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageParameters) DeepCopyInto(out *StorageParameters) { + *out = *in + if in.WiredTiger != nil { + in, out := &in.WiredTiger, &out.WiredTiger + *out = make([]WiredTigerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageParameters. +func (in *StorageParameters) DeepCopy() *StorageParameters { + if in == nil { + return nil + } + out := new(StorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageWiredTigerInitParameters) DeepCopyInto(out *StorageWiredTigerInitParameters) { + *out = *in + if in.BlockCompressor != nil { + in, out := &in.BlockCompressor, &out.BlockCompressor + *out = new(string) + **out = **in + } + if in.CacheSizeGb != nil { + in, out := &in.CacheSizeGb, &out.CacheSizeGb + *out = new(float64) + **out = **in + } + if in.PrefixCompression != nil { + in, out := &in.PrefixCompression, &out.PrefixCompression + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageWiredTigerInitParameters. +func (in *StorageWiredTigerInitParameters) DeepCopy() *StorageWiredTigerInitParameters { + if in == nil { + return nil + } + out := new(StorageWiredTigerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageWiredTigerObservation) DeepCopyInto(out *StorageWiredTigerObservation) { + *out = *in + if in.BlockCompressor != nil { + in, out := &in.BlockCompressor, &out.BlockCompressor + *out = new(string) + **out = **in + } + if in.CacheSizeGb != nil { + in, out := &in.CacheSizeGb, &out.CacheSizeGb + *out = new(float64) + **out = **in + } + if in.PrefixCompression != nil { + in, out := &in.PrefixCompression, &out.PrefixCompression + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageWiredTigerObservation. +func (in *StorageWiredTigerObservation) DeepCopy() *StorageWiredTigerObservation { + if in == nil { + return nil + } + out := new(StorageWiredTigerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageWiredTigerParameters) DeepCopyInto(out *StorageWiredTigerParameters) { + *out = *in + if in.BlockCompressor != nil { + in, out := &in.BlockCompressor, &out.BlockCompressor + *out = new(string) + **out = **in + } + if in.CacheSizeGb != nil { + in, out := &in.CacheSizeGb, &out.CacheSizeGb + *out = new(float64) + **out = **in + } + if in.PrefixCompression != nil { + in, out := &in.PrefixCompression, &out.PrefixCompression + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageWiredTigerParameters. +func (in *StorageWiredTigerParameters) DeepCopy() *StorageWiredTigerParameters { + if in == nil { + return nil + } + out := new(StorageWiredTigerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetClusterExternalClusterInitParameters) DeepCopyInto(out *TargetClusterExternalClusterInitParameters) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = new(string) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetClusterExternalClusterInitParameters. +func (in *TargetClusterExternalClusterInitParameters) DeepCopy() *TargetClusterExternalClusterInitParameters { + if in == nil { + return nil + } + out := new(TargetClusterExternalClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetClusterExternalClusterObservation) DeepCopyInto(out *TargetClusterExternalClusterObservation) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = new(string) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetClusterExternalClusterObservation. +func (in *TargetClusterExternalClusterObservation) DeepCopy() *TargetClusterExternalClusterObservation { + if in == nil { + return nil + } + out := new(TargetClusterExternalClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetClusterExternalClusterParameters) DeepCopyInto(out *TargetClusterExternalClusterParameters) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = new(string) + **out = **in + } + if in.SaslMechanism != nil { + in, out := &in.SaslMechanism, &out.SaslMechanism + *out = new(string) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetClusterExternalClusterParameters. +func (in *TargetClusterExternalClusterParameters) DeepCopy() *TargetClusterExternalClusterParameters { + if in == nil { + return nil + } + out := new(TargetClusterExternalClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetClusterInitParameters) DeepCopyInto(out *TargetClusterInitParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.ExternalCluster != nil { + in, out := &in.ExternalCluster, &out.ExternalCluster + *out = make([]TargetClusterExternalClusterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThisCluster != nil { + in, out := &in.ThisCluster, &out.ThisCluster + *out = make([]TargetClusterThisClusterInitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetClusterInitParameters. +func (in *TargetClusterInitParameters) DeepCopy() *TargetClusterInitParameters { + if in == nil { + return nil + } + out := new(TargetClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetClusterObservation) DeepCopyInto(out *TargetClusterObservation) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.ExternalCluster != nil { + in, out := &in.ExternalCluster, &out.ExternalCluster + *out = make([]TargetClusterExternalClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThisCluster != nil { + in, out := &in.ThisCluster, &out.ThisCluster + *out = make([]TargetClusterThisClusterParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetClusterObservation. +func (in *TargetClusterObservation) DeepCopy() *TargetClusterObservation { + if in == nil { + return nil + } + out := new(TargetClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetClusterParameters) DeepCopyInto(out *TargetClusterParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.ExternalCluster != nil { + in, out := &in.ExternalCluster, &out.ExternalCluster + *out = make([]TargetClusterExternalClusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThisCluster != nil { + in, out := &in.ThisCluster, &out.ThisCluster + *out = make([]TargetClusterThisClusterParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetClusterParameters. +func (in *TargetClusterParameters) DeepCopy() *TargetClusterParameters { + if in == nil { + return nil + } + out := new(TargetClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetClusterThisClusterInitParameters) DeepCopyInto(out *TargetClusterThisClusterInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetClusterThisClusterInitParameters. +func (in *TargetClusterThisClusterInitParameters) DeepCopy() *TargetClusterThisClusterInitParameters { + if in == nil { + return nil + } + out := new(TargetClusterThisClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetClusterThisClusterObservation) DeepCopyInto(out *TargetClusterThisClusterObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetClusterThisClusterObservation. +func (in *TargetClusterThisClusterObservation) DeepCopy() *TargetClusterThisClusterObservation { + if in == nil { + return nil + } + out := new(TargetClusterThisClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetClusterThisClusterParameters) DeepCopyInto(out *TargetClusterThisClusterParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetClusterThisClusterParameters. +func (in *TargetClusterThisClusterParameters) DeepCopy() *TargetClusterThisClusterParameters { + if in == nil { + return nil + } + out := new(TargetClusterThisClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThisClusterInitParameters) DeepCopyInto(out *ThisClusterInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThisClusterInitParameters. +func (in *ThisClusterInitParameters) DeepCopy() *ThisClusterInitParameters { + if in == nil { + return nil + } + out := new(ThisClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThisClusterObservation) DeepCopyInto(out *ThisClusterObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThisClusterObservation. +func (in *ThisClusterObservation) DeepCopy() *ThisClusterObservation { + if in == nil { + return nil + } + out := new(ThisClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThisClusterParameters) DeepCopyInto(out *ThisClusterParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThisClusterParameters. +func (in *ThisClusterParameters) DeepCopy() *ThisClusterParameters { + if in == nil { + return nil + } + out := new(ThisClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicConfigInitParameters) DeepCopyInto(out *TopicConfigInitParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.DeleteRetentionMs != nil { + in, out := &in.DeleteRetentionMs, &out.DeleteRetentionMs + *out = new(string) + **out = **in + } + if in.FileDeleteDelayMs != nil { + in, out := &in.FileDeleteDelayMs, &out.FileDeleteDelayMs + *out = new(string) + **out = **in + } + if in.FlushMessages != nil { + in, out := &in.FlushMessages, &out.FlushMessages + *out = new(string) + **out = **in + } + if in.FlushMs != nil { + in, out := &in.FlushMs, &out.FlushMs + *out = new(string) + **out = **in + } + if in.MaxMessageBytes != nil { + in, out := &in.MaxMessageBytes, &out.MaxMessageBytes + *out = new(string) + **out = **in + } + if in.MinCompactionLagMs != nil { + in, out := &in.MinCompactionLagMs, &out.MinCompactionLagMs + *out = new(string) + **out = **in + } + if in.MinInsyncReplicas != nil { + in, out := &in.MinInsyncReplicas, &out.MinInsyncReplicas + *out = new(string) + **out = **in + } + if in.Preallocate != nil { + in, out := &in.Preallocate, &out.Preallocate + *out = new(bool) + **out = **in + } + if in.RetentionBytes != nil { + in, out := &in.RetentionBytes, &out.RetentionBytes + *out = new(string) + **out = **in + } + if in.RetentionMs != nil { + in, out := &in.RetentionMs, &out.RetentionMs + *out = new(string) + **out = **in + } + if in.SegmentBytes != nil { + in, out := &in.SegmentBytes, &out.SegmentBytes + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicConfigInitParameters. +func (in *TopicConfigInitParameters) DeepCopy() *TopicConfigInitParameters { + if in == nil { + return nil + } + out := new(TopicConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicConfigObservation) DeepCopyInto(out *TopicConfigObservation) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.DeleteRetentionMs != nil { + in, out := &in.DeleteRetentionMs, &out.DeleteRetentionMs + *out = new(string) + **out = **in + } + if in.FileDeleteDelayMs != nil { + in, out := &in.FileDeleteDelayMs, &out.FileDeleteDelayMs + *out = new(string) + **out = **in + } + if in.FlushMessages != nil { + in, out := &in.FlushMessages, &out.FlushMessages + *out = new(string) + **out = **in + } + if in.FlushMs != nil { + in, out := &in.FlushMs, &out.FlushMs + *out = new(string) + **out = **in + } + if in.MaxMessageBytes != nil { + in, out := &in.MaxMessageBytes, &out.MaxMessageBytes + *out = new(string) + **out = **in + } + if in.MinCompactionLagMs != nil { + in, out := &in.MinCompactionLagMs, &out.MinCompactionLagMs + *out = new(string) + **out = **in + } + if in.MinInsyncReplicas != nil { + in, out := &in.MinInsyncReplicas, &out.MinInsyncReplicas + *out = new(string) + **out = **in + } + if in.Preallocate != nil { + in, out := &in.Preallocate, &out.Preallocate + *out = new(bool) + **out = **in + } + if in.RetentionBytes != nil { + in, out := &in.RetentionBytes, &out.RetentionBytes + *out = new(string) + **out = **in + } + if in.RetentionMs != nil { + in, out := &in.RetentionMs, &out.RetentionMs + *out = new(string) + **out = **in + } + if in.SegmentBytes != nil { + in, out := &in.SegmentBytes, &out.SegmentBytes + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicConfigObservation. +func (in *TopicConfigObservation) DeepCopy() *TopicConfigObservation { + if in == nil { + return nil + } + out := new(TopicConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicConfigParameters) DeepCopyInto(out *TopicConfigParameters) { + *out = *in + if in.CleanupPolicy != nil { + in, out := &in.CleanupPolicy, &out.CleanupPolicy + *out = new(string) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.DeleteRetentionMs != nil { + in, out := &in.DeleteRetentionMs, &out.DeleteRetentionMs + *out = new(string) + **out = **in + } + if in.FileDeleteDelayMs != nil { + in, out := &in.FileDeleteDelayMs, &out.FileDeleteDelayMs + *out = new(string) + **out = **in + } + if in.FlushMessages != nil { + in, out := &in.FlushMessages, &out.FlushMessages + *out = new(string) + **out = **in + } + if in.FlushMs != nil { + in, out := &in.FlushMs, &out.FlushMs + *out = new(string) + **out = **in + } + if in.MaxMessageBytes != nil { + in, out := &in.MaxMessageBytes, &out.MaxMessageBytes + *out = new(string) + **out = **in + } + if in.MinCompactionLagMs != nil { + in, out := &in.MinCompactionLagMs, &out.MinCompactionLagMs + *out = new(string) + **out = **in + } + if in.MinInsyncReplicas != nil { + in, out := &in.MinInsyncReplicas, &out.MinInsyncReplicas + *out = new(string) + **out = **in + } + if in.Preallocate != nil { + in, out := &in.Preallocate, &out.Preallocate + *out = new(bool) + **out = **in + } + if in.RetentionBytes != nil { + in, out := &in.RetentionBytes, &out.RetentionBytes + *out = new(string) + **out = **in + } + if in.RetentionMs != nil { + in, out := &in.RetentionMs, &out.RetentionMs + *out = new(string) + **out = **in + } + if in.SegmentBytes != nil { + in, out := &in.SegmentBytes, &out.SegmentBytes + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicConfigParameters. +func (in *TopicConfigParameters) DeepCopy() *TopicConfigParameters { + if in == nil { + return nil + } + out := new(TopicConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInitParameters) DeepCopyInto(out *TopicInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(float64) + **out = **in + } + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.TopicConfig != nil { + in, out := &in.TopicConfig, &out.TopicConfig + *out = make([]TopicConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInitParameters. +func (in *TopicInitParameters) DeepCopy() *TopicInitParameters { + if in == nil { + return nil + } + out := new(TopicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicObservation) DeepCopyInto(out *TopicObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(float64) + **out = **in + } + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.TopicConfig != nil { + in, out := &in.TopicConfig, &out.TopicConfig + *out = make([]TopicConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicObservation. +func (in *TopicObservation) DeepCopy() *TopicObservation { + if in == nil { + return nil + } + out := new(TopicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicParameters) DeepCopyInto(out *TopicParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(float64) + **out = **in + } + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.TopicConfig != nil { + in, out := &in.TopicConfig, &out.TopicConfig + *out = make([]TopicConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicParameters. +func (in *TopicParameters) DeepCopy() *TopicParameters { + if in == nil { + return nil + } + out := new(TopicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInitParameters) DeepCopyInto(out *UserInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]PermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = make([]QuotaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]UserSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInitParameters. +func (in *UserInitParameters) DeepCopy() *UserInitParameters { + if in == nil { + return nil + } + out := new(UserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserObservation) DeepCopyInto(out *UserObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]PermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = make([]QuotaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]UserSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { + if in == nil { + return nil + } + out := new(UserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserParameters) DeepCopyInto(out *UserParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = make([]PermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = make([]QuotaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]UserSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { + if in == nil { + return nil + } + out := new(UserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPermissionInitParameters) DeepCopyInto(out *UserPermissionInitParameters) { + *out = *in + if in.AllowHosts != nil { + in, out := &in.AllowHosts, &out.AllowHosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPermissionInitParameters. +func (in *UserPermissionInitParameters) DeepCopy() *UserPermissionInitParameters { + if in == nil { + return nil + } + out := new(UserPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPermissionObservation) DeepCopyInto(out *UserPermissionObservation) { + *out = *in + if in.AllowHosts != nil { + in, out := &in.AllowHosts, &out.AllowHosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPermissionObservation. +func (in *UserPermissionObservation) DeepCopy() *UserPermissionObservation { + if in == nil { + return nil + } + out := new(UserPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPermissionParameters) DeepCopyInto(out *UserPermissionParameters) { + *out = *in + if in.AllowHosts != nil { + in, out := &in.AllowHosts, &out.AllowHosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPermissionParameters. +func (in *UserPermissionParameters) DeepCopy() *UserPermissionParameters { + if in == nil { + return nil + } + out := new(UserPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsInitParameters) DeepCopyInto(out *UserSettingsInitParameters) { + *out = *in + if in.AddHTTPCorsHeader != nil { + in, out := &in.AddHTTPCorsHeader, &out.AddHTTPCorsHeader + *out = new(bool) + **out = **in + } + if in.AllowDdl != nil { + in, out := &in.AllowDdl, &out.AllowDdl + *out = new(bool) + **out = **in + } + if in.AllowIntrospectionFunctions != nil { + in, out := &in.AllowIntrospectionFunctions, &out.AllowIntrospectionFunctions + *out = new(bool) + **out = **in + } + if in.AllowSuspiciousLowCardinalityTypes != nil { + in, out := &in.AllowSuspiciousLowCardinalityTypes, &out.AllowSuspiciousLowCardinalityTypes + *out = new(bool) + **out = **in + } + if in.AnyJoinDistinctRightTableKeys != nil { + in, out := &in.AnyJoinDistinctRightTableKeys, &out.AnyJoinDistinctRightTableKeys + *out = new(bool) + **out = **in + } + if in.AsyncInsert != nil { + in, out := &in.AsyncInsert, &out.AsyncInsert + *out = new(bool) + **out = **in + } + if in.AsyncInsertBusyTimeout != nil { + in, out := &in.AsyncInsertBusyTimeout, &out.AsyncInsertBusyTimeout + *out = new(float64) + **out = **in + } + if in.AsyncInsertMaxDataSize != nil { + in, out := &in.AsyncInsertMaxDataSize, &out.AsyncInsertMaxDataSize + *out = new(float64) + **out = **in + } + if in.AsyncInsertStaleTimeout != nil { + in, out := &in.AsyncInsertStaleTimeout, &out.AsyncInsertStaleTimeout + *out = new(float64) + **out = **in + } + if in.AsyncInsertThreads != nil { + in, out := &in.AsyncInsertThreads, &out.AsyncInsertThreads + *out = new(float64) + **out = **in + } + if in.CancelHTTPReadonlyQueriesOnClientClose != nil { + in, out := &in.CancelHTTPReadonlyQueriesOnClientClose, &out.CancelHTTPReadonlyQueriesOnClientClose + *out = new(bool) + **out = **in + } + if in.Compile != nil { + in, out := &in.Compile, &out.Compile + *out = new(bool) + **out = **in + } + if in.CompileExpressions != nil { + in, out := &in.CompileExpressions, &out.CompileExpressions + *out = new(bool) + **out = **in + } + if in.ConnectTimeout != nil { + in, out := &in.ConnectTimeout, &out.ConnectTimeout + *out = new(float64) + **out = **in + } + if in.ConnectTimeoutWithFailover != nil { + in, out := &in.ConnectTimeoutWithFailover, &out.ConnectTimeoutWithFailover + *out = new(float64) + **out = **in + } + if in.CountDistinctImplementation != nil { + in, out := &in.CountDistinctImplementation, &out.CountDistinctImplementation + *out = new(string) + **out = **in + } + if in.DateTimeInputFormat != nil { + in, out := &in.DateTimeInputFormat, &out.DateTimeInputFormat + *out = new(string) + **out = **in + } + if in.DateTimeOutputFormat != nil { + in, out := &in.DateTimeOutputFormat, &out.DateTimeOutputFormat + *out = new(string) + **out = **in + } + if in.DeduplicateBlocksInDependentMaterializedViews != nil { + in, out := &in.DeduplicateBlocksInDependentMaterializedViews, &out.DeduplicateBlocksInDependentMaterializedViews + *out = new(bool) + **out = **in + } + if in.DistinctOverflowMode != nil { + in, out := &in.DistinctOverflowMode, &out.DistinctOverflowMode + *out = new(string) + **out = **in + } + if in.DistributedAggregationMemoryEfficient != nil { + in, out := &in.DistributedAggregationMemoryEfficient, &out.DistributedAggregationMemoryEfficient + *out = new(bool) + **out = **in + } + if in.DistributedDdlTaskTimeout != nil { + in, out := &in.DistributedDdlTaskTimeout, &out.DistributedDdlTaskTimeout + *out = new(float64) + **out = **in + } + if in.DistributedProductMode != nil { + in, out := &in.DistributedProductMode, &out.DistributedProductMode + *out = new(string) + **out = **in + } + if in.EmptyResultForAggregationByEmptySet != nil { + in, out := &in.EmptyResultForAggregationByEmptySet, &out.EmptyResultForAggregationByEmptySet + *out = new(bool) + **out = **in + } + if in.EnableHTTPCompression != nil { + in, out := &in.EnableHTTPCompression, &out.EnableHTTPCompression + *out = new(bool) + **out = **in + } + if in.FallbackToStaleReplicasForDistributedQueries != nil { + in, out := &in.FallbackToStaleReplicasForDistributedQueries, &out.FallbackToStaleReplicasForDistributedQueries + *out = new(bool) + **out = **in + } + if in.FlattenNested != nil { + in, out := &in.FlattenNested, &out.FlattenNested + *out = new(bool) + **out = **in + } + if in.ForceIndexByDate != nil { + in, out := &in.ForceIndexByDate, &out.ForceIndexByDate + *out = new(bool) + **out = **in + } + if in.ForcePrimaryKey != nil { + in, out := &in.ForcePrimaryKey, &out.ForcePrimaryKey + *out = new(bool) + **out = **in + } + if in.FormatRegexp != nil { + in, out := &in.FormatRegexp, &out.FormatRegexp + *out = new(string) + **out = **in + } + if in.FormatRegexpSkipUnmatched != nil { + in, out := &in.FormatRegexpSkipUnmatched, &out.FormatRegexpSkipUnmatched + *out = new(bool) + **out = **in + } + if in.GroupByOverflowMode != nil { + in, out := &in.GroupByOverflowMode, &out.GroupByOverflowMode + *out = new(string) + **out = **in + } + if in.GroupByTwoLevelThreshold != nil { + in, out := &in.GroupByTwoLevelThreshold, &out.GroupByTwoLevelThreshold + *out = new(float64) + **out = **in + } + if in.GroupByTwoLevelThresholdBytes != nil { + in, out := &in.GroupByTwoLevelThresholdBytes, &out.GroupByTwoLevelThresholdBytes + *out = new(float64) + **out = **in + } + if in.HTTPConnectionTimeout != nil { + in, out := &in.HTTPConnectionTimeout, &out.HTTPConnectionTimeout + *out = new(float64) + **out = **in + } + if in.HTTPHeadersProgressInterval != nil { + in, out := &in.HTTPHeadersProgressInterval, &out.HTTPHeadersProgressInterval + *out = new(float64) + **out = **in + } + if in.HTTPReceiveTimeout != nil { + in, out := &in.HTTPReceiveTimeout, &out.HTTPReceiveTimeout + *out = new(float64) + **out = **in + } + if in.HTTPSendTimeout != nil { + in, out := &in.HTTPSendTimeout, &out.HTTPSendTimeout + *out = new(float64) + **out = **in + } + if in.HedgedConnectionTimeoutMs != nil { + in, out := &in.HedgedConnectionTimeoutMs, &out.HedgedConnectionTimeoutMs + *out = new(float64) + **out = **in + } + if in.IdleConnectionTimeout != nil { + in, out := &in.IdleConnectionTimeout, &out.IdleConnectionTimeout + *out = new(float64) + **out = **in + } + if in.InputFormatDefaultsForOmittedFields != nil { + in, out := &in.InputFormatDefaultsForOmittedFields, &out.InputFormatDefaultsForOmittedFields + *out = new(bool) + **out = **in + } + if in.InputFormatImportNestedJSON != nil { + in, out := &in.InputFormatImportNestedJSON, &out.InputFormatImportNestedJSON + *out = new(bool) + **out = **in + } + if in.InputFormatNullAsDefault != nil { + in, out := &in.InputFormatNullAsDefault, &out.InputFormatNullAsDefault + *out = new(bool) + **out = **in + } + if in.InputFormatParallelParsing != nil { + in, out := &in.InputFormatParallelParsing, &out.InputFormatParallelParsing + *out = new(bool) + **out = **in + } + if in.InputFormatValuesInterpretExpressions != nil { + in, out := &in.InputFormatValuesInterpretExpressions, &out.InputFormatValuesInterpretExpressions + *out = new(bool) + **out = **in + } + if in.InputFormatWithNamesUseHeader != nil { + in, out := &in.InputFormatWithNamesUseHeader, &out.InputFormatWithNamesUseHeader + *out = new(bool) + **out = **in + } + if in.InsertKeeperMaxRetries != nil { + in, out := &in.InsertKeeperMaxRetries, &out.InsertKeeperMaxRetries + *out = new(float64) + **out = **in + } + if in.InsertNullAsDefault != nil { + in, out := &in.InsertNullAsDefault, &out.InsertNullAsDefault + *out = new(bool) + **out = **in + } + if in.InsertQuorum != nil { + in, out := &in.InsertQuorum, &out.InsertQuorum + *out = new(float64) + **out = **in + } + if in.InsertQuorumParallel != nil { + in, out := &in.InsertQuorumParallel, &out.InsertQuorumParallel + *out = new(bool) + **out = **in + } + if in.InsertQuorumTimeout != nil { + in, out := &in.InsertQuorumTimeout, &out.InsertQuorumTimeout + *out = new(float64) + **out = **in + } + if in.JoinAlgorithm != nil { + in, out := &in.JoinAlgorithm, &out.JoinAlgorithm + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JoinOverflowMode != nil { + in, out := &in.JoinOverflowMode, &out.JoinOverflowMode + *out = new(string) + **out = **in + } + if in.JoinUseNulls != nil { + in, out := &in.JoinUseNulls, &out.JoinUseNulls + *out = new(bool) + **out = **in + } + if in.JoinedSubqueryRequiresAlias != nil { + in, out := &in.JoinedSubqueryRequiresAlias, &out.JoinedSubqueryRequiresAlias + *out = new(bool) + **out = **in + } + if in.LoadBalancing != nil { + in, out := &in.LoadBalancing, &out.LoadBalancing + *out = new(string) + **out = **in + } + if in.LocalFilesystemReadMethod != nil { + in, out := &in.LocalFilesystemReadMethod, &out.LocalFilesystemReadMethod + *out = new(string) + **out = **in + } + if in.LogQueryThreads != nil { + in, out := &in.LogQueryThreads, &out.LogQueryThreads + *out = new(bool) + **out = **in + } + if in.LowCardinalityAllowInNativeFormat != nil { + in, out := &in.LowCardinalityAllowInNativeFormat, &out.LowCardinalityAllowInNativeFormat + *out = new(bool) + **out = **in + } + if in.MaxAstDepth != nil { + in, out := &in.MaxAstDepth, &out.MaxAstDepth + *out = new(float64) + **out = **in + } + if in.MaxAstElements != nil { + in, out := &in.MaxAstElements, &out.MaxAstElements + *out = new(float64) + **out = **in + } + if in.MaxBlockSize != nil { + in, out := &in.MaxBlockSize, &out.MaxBlockSize + *out = new(float64) + **out = **in + } + if in.MaxBytesBeforeExternalGroupBy != nil { + in, out := &in.MaxBytesBeforeExternalGroupBy, &out.MaxBytesBeforeExternalGroupBy + *out = new(float64) + **out = **in + } + if in.MaxBytesBeforeExternalSort != nil { + in, out := &in.MaxBytesBeforeExternalSort, &out.MaxBytesBeforeExternalSort + *out = new(float64) + **out = **in + } + if in.MaxBytesInDistinct != nil { + in, out := &in.MaxBytesInDistinct, &out.MaxBytesInDistinct + *out = new(float64) + **out = **in + } + if in.MaxBytesInJoin != nil { + in, out := &in.MaxBytesInJoin, &out.MaxBytesInJoin + *out = new(float64) + **out = **in + } + if in.MaxBytesInSet != nil { + in, out := &in.MaxBytesInSet, &out.MaxBytesInSet + *out = new(float64) + **out = **in + } + if in.MaxBytesToRead != nil { + in, out := &in.MaxBytesToRead, &out.MaxBytesToRead + *out = new(float64) + **out = **in + } + if in.MaxBytesToSort != nil { + in, out := &in.MaxBytesToSort, &out.MaxBytesToSort + *out = new(float64) + **out = **in + } + if in.MaxBytesToTransfer != nil { + in, out := &in.MaxBytesToTransfer, &out.MaxBytesToTransfer + *out = new(float64) + **out = **in + } + if in.MaxColumnsToRead != nil { + in, out := &in.MaxColumnsToRead, &out.MaxColumnsToRead + *out = new(float64) + **out = **in + } + if in.MaxConcurrentQueriesForUser != nil { + in, out := &in.MaxConcurrentQueriesForUser, &out.MaxConcurrentQueriesForUser + *out = new(float64) + **out = **in + } + if in.MaxExecutionTime != nil { + in, out := &in.MaxExecutionTime, &out.MaxExecutionTime + *out = new(float64) + **out = **in + } + if in.MaxExpandedAstElements != nil { + in, out := &in.MaxExpandedAstElements, &out.MaxExpandedAstElements + *out = new(float64) + **out = **in + } + if in.MaxFinalThreads != nil { + in, out := &in.MaxFinalThreads, &out.MaxFinalThreads + *out = new(float64) + **out = **in + } + if in.MaxHTTPGetRedirects != nil { + in, out := &in.MaxHTTPGetRedirects, &out.MaxHTTPGetRedirects + *out = new(float64) + **out = **in + } + if in.MaxInsertBlockSize != nil { + in, out := &in.MaxInsertBlockSize, &out.MaxInsertBlockSize + *out = new(float64) + **out = **in + } + if in.MaxInsertThreads != nil { + in, out := &in.MaxInsertThreads, &out.MaxInsertThreads + *out = new(float64) + **out = **in + } + if in.MaxMemoryUsage != nil { + in, out := &in.MaxMemoryUsage, &out.MaxMemoryUsage + *out = new(float64) + **out = **in + } + if in.MaxMemoryUsageForUser != nil { + in, out := &in.MaxMemoryUsageForUser, &out.MaxMemoryUsageForUser + *out = new(float64) + **out = **in + } + if in.MaxNetworkBandwidth != nil { + in, out := &in.MaxNetworkBandwidth, &out.MaxNetworkBandwidth + *out = new(float64) + **out = **in + } + if in.MaxNetworkBandwidthForUser != nil { + in, out := &in.MaxNetworkBandwidthForUser, &out.MaxNetworkBandwidthForUser + *out = new(float64) + **out = **in + } + if in.MaxParserDepth != nil { + in, out := &in.MaxParserDepth, &out.MaxParserDepth + *out = new(float64) + **out = **in + } + if in.MaxQuerySize != nil { + in, out := &in.MaxQuerySize, &out.MaxQuerySize + *out = new(float64) + **out = **in + } + if in.MaxReadBufferSize != nil { + in, out := &in.MaxReadBufferSize, &out.MaxReadBufferSize + *out = new(float64) + **out = **in + } + if in.MaxReplicaDelayForDistributedQueries != nil { + in, out := &in.MaxReplicaDelayForDistributedQueries, &out.MaxReplicaDelayForDistributedQueries + *out = new(float64) + **out = **in + } + if in.MaxResultBytes != nil { + in, out := &in.MaxResultBytes, &out.MaxResultBytes + *out = new(float64) + **out = **in + } + if in.MaxResultRows != nil { + in, out := &in.MaxResultRows, &out.MaxResultRows + *out = new(float64) + **out = **in + } + if in.MaxRowsInDistinct != nil { + in, out := &in.MaxRowsInDistinct, &out.MaxRowsInDistinct + *out = new(float64) + **out = **in + } + if in.MaxRowsInJoin != nil { + in, out := &in.MaxRowsInJoin, &out.MaxRowsInJoin + *out = new(float64) + **out = **in + } + if in.MaxRowsInSet != nil { + in, out := &in.MaxRowsInSet, &out.MaxRowsInSet + *out = new(float64) + **out = **in + } + if in.MaxRowsToGroupBy != nil { + in, out := &in.MaxRowsToGroupBy, &out.MaxRowsToGroupBy + *out = new(float64) + **out = **in + } + if in.MaxRowsToRead != nil { + in, out := &in.MaxRowsToRead, &out.MaxRowsToRead + *out = new(float64) + **out = **in + } + if in.MaxRowsToSort != nil { + in, out := &in.MaxRowsToSort, &out.MaxRowsToSort + *out = new(float64) + **out = **in + } + if in.MaxRowsToTransfer != nil { + in, out := &in.MaxRowsToTransfer, &out.MaxRowsToTransfer + *out = new(float64) + **out = **in + } + if in.MaxTemporaryColumns != nil { + in, out := &in.MaxTemporaryColumns, &out.MaxTemporaryColumns + *out = new(float64) + **out = **in + } + if in.MaxTemporaryDataOnDiskSizeForQuery != nil { + in, out := &in.MaxTemporaryDataOnDiskSizeForQuery, &out.MaxTemporaryDataOnDiskSizeForQuery + *out = new(float64) + **out = **in + } + if in.MaxTemporaryDataOnDiskSizeForUser != nil { + in, out := &in.MaxTemporaryDataOnDiskSizeForUser, &out.MaxTemporaryDataOnDiskSizeForUser + *out = new(float64) + **out = **in + } + if in.MaxTemporaryNonConstColumns != nil { + in, out := &in.MaxTemporaryNonConstColumns, &out.MaxTemporaryNonConstColumns + *out = new(float64) + **out = **in + } + if in.MaxThreads != nil { + in, out := &in.MaxThreads, &out.MaxThreads + *out = new(float64) + **out = **in + } + if in.MemoryOvercommitRatioDenominator != nil { + in, out := &in.MemoryOvercommitRatioDenominator, &out.MemoryOvercommitRatioDenominator + *out = new(float64) + **out = **in + } + if in.MemoryOvercommitRatioDenominatorForUser != nil { + in, out := &in.MemoryOvercommitRatioDenominatorForUser, &out.MemoryOvercommitRatioDenominatorForUser + *out = new(float64) + **out = **in + } + if in.MemoryProfilerSampleProbability != nil { + in, out := &in.MemoryProfilerSampleProbability, &out.MemoryProfilerSampleProbability + *out = new(float64) + **out = **in + } + if in.MemoryProfilerStep != nil { + in, out := &in.MemoryProfilerStep, &out.MemoryProfilerStep + *out = new(float64) + **out = **in + } + if in.MemoryUsageOvercommitMaxWaitMicroseconds != nil { + in, out := &in.MemoryUsageOvercommitMaxWaitMicroseconds, &out.MemoryUsageOvercommitMaxWaitMicroseconds + *out = new(float64) + **out = **in + } + if in.MergeTreeMaxBytesToUseCache != nil { + in, out := &in.MergeTreeMaxBytesToUseCache, &out.MergeTreeMaxBytesToUseCache + *out = new(float64) + **out = **in + } + if in.MergeTreeMaxRowsToUseCache != nil { + in, out := &in.MergeTreeMaxRowsToUseCache, &out.MergeTreeMaxRowsToUseCache + *out = new(float64) + **out = **in + } + if in.MergeTreeMinBytesForConcurrentRead != nil { + in, out := &in.MergeTreeMinBytesForConcurrentRead, &out.MergeTreeMinBytesForConcurrentRead + *out = new(float64) + **out = **in + } + if in.MergeTreeMinRowsForConcurrentRead != nil { + in, out := &in.MergeTreeMinRowsForConcurrentRead, &out.MergeTreeMinRowsForConcurrentRead + *out = new(float64) + **out = **in + } + if in.MinBytesToUseDirectIo != nil { + in, out := &in.MinBytesToUseDirectIo, &out.MinBytesToUseDirectIo + *out = new(float64) + **out = **in + } + if in.MinCountToCompile != nil { + in, out := &in.MinCountToCompile, &out.MinCountToCompile + *out = new(float64) + **out = **in + } + if in.MinCountToCompileExpression != nil { + in, out := &in.MinCountToCompileExpression, &out.MinCountToCompileExpression + *out = new(float64) + **out = **in + } + if in.MinExecutionSpeed != nil { + in, out := &in.MinExecutionSpeed, &out.MinExecutionSpeed + *out = new(float64) + **out = **in + } + if in.MinExecutionSpeedBytes != nil { + in, out := &in.MinExecutionSpeedBytes, &out.MinExecutionSpeedBytes + *out = new(float64) + **out = **in + } + if in.MinInsertBlockSizeBytes != nil { + in, out := &in.MinInsertBlockSizeBytes, &out.MinInsertBlockSizeBytes + *out = new(float64) + **out = **in + } + if in.MinInsertBlockSizeRows != nil { + in, out := &in.MinInsertBlockSizeRows, &out.MinInsertBlockSizeRows + *out = new(float64) + **out = **in + } + if in.OutputFormatJSONQuote64BitIntegers != nil { + in, out := &in.OutputFormatJSONQuote64BitIntegers, &out.OutputFormatJSONQuote64BitIntegers + *out = new(bool) + **out = **in + } + if in.OutputFormatJSONQuoteDenormals != nil { + in, out := &in.OutputFormatJSONQuoteDenormals, &out.OutputFormatJSONQuoteDenormals + *out = new(bool) + **out = **in + } + if in.PreferLocalhostReplica != nil { + in, out := &in.PreferLocalhostReplica, &out.PreferLocalhostReplica + *out = new(bool) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.QuotaMode != nil { + in, out := &in.QuotaMode, &out.QuotaMode + *out = new(string) + **out = **in + } + if in.ReadOverflowMode != nil { + in, out := &in.ReadOverflowMode, &out.ReadOverflowMode + *out = new(string) + **out = **in + } + if in.Readonly != nil { + in, out := &in.Readonly, &out.Readonly + *out = new(float64) + **out = **in + } + if in.ReceiveTimeout != nil { + in, out := &in.ReceiveTimeout, &out.ReceiveTimeout + *out = new(float64) + **out = **in + } + if in.RemoteFilesystemReadMethod != nil { + in, out := &in.RemoteFilesystemReadMethod, &out.RemoteFilesystemReadMethod + *out = new(string) + **out = **in + } + if in.ReplicationAlterPartitionsSync != nil { + in, out := &in.ReplicationAlterPartitionsSync, &out.ReplicationAlterPartitionsSync + *out = new(float64) + **out = **in + } + if in.ResultOverflowMode != nil { + in, out := &in.ResultOverflowMode, &out.ResultOverflowMode + *out = new(string) + **out = **in + } + if in.SelectSequentialConsistency != nil { + in, out := &in.SelectSequentialConsistency, &out.SelectSequentialConsistency + *out = new(bool) + **out = **in + } + if in.SendProgressInHTTPHeaders != nil { + in, out := &in.SendProgressInHTTPHeaders, &out.SendProgressInHTTPHeaders + *out = new(bool) + **out = **in + } + if in.SendTimeout != nil { + in, out := &in.SendTimeout, &out.SendTimeout + *out = new(float64) + **out = **in + } + if in.SetOverflowMode != nil { + in, out := &in.SetOverflowMode, &out.SetOverflowMode + *out = new(string) + **out = **in + } + if in.SkipUnavailableShards != nil { + in, out := &in.SkipUnavailableShards, &out.SkipUnavailableShards + *out = new(bool) + **out = **in + } + if in.SortOverflowMode != nil { + in, out := &in.SortOverflowMode, &out.SortOverflowMode + *out = new(string) + **out = **in + } + if in.TimeoutBeforeCheckingExecutionSpeed != nil { + in, out := &in.TimeoutBeforeCheckingExecutionSpeed, &out.TimeoutBeforeCheckingExecutionSpeed + *out = new(float64) + **out = **in + } + if in.TimeoutOverflowMode != nil { + in, out := &in.TimeoutOverflowMode, &out.TimeoutOverflowMode + *out = new(string) + **out = **in + } + if in.TransferOverflowMode != nil { + in, out := &in.TransferOverflowMode, &out.TransferOverflowMode + *out = new(string) + **out = **in + } + if in.TransformNullIn != nil { + in, out := &in.TransformNullIn, &out.TransformNullIn + *out = new(bool) + **out = **in + } + if in.UseHedgedRequests != nil { + in, out := &in.UseHedgedRequests, &out.UseHedgedRequests + *out = new(bool) + **out = **in + } + if in.UseUncompressedCache != nil { + in, out := &in.UseUncompressedCache, &out.UseUncompressedCache + *out = new(bool) + **out = **in + } + if in.WaitForAsyncInsert != nil { + in, out := &in.WaitForAsyncInsert, &out.WaitForAsyncInsert + *out = new(bool) + **out = **in + } + if in.WaitForAsyncInsertTimeout != nil { + in, out := &in.WaitForAsyncInsertTimeout, &out.WaitForAsyncInsertTimeout + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsInitParameters. +func (in *UserSettingsInitParameters) DeepCopy() *UserSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsObservation) DeepCopyInto(out *UserSettingsObservation) { + *out = *in + if in.AddHTTPCorsHeader != nil { + in, out := &in.AddHTTPCorsHeader, &out.AddHTTPCorsHeader + *out = new(bool) + **out = **in + } + if in.AllowDdl != nil { + in, out := &in.AllowDdl, &out.AllowDdl + *out = new(bool) + **out = **in + } + if in.AllowIntrospectionFunctions != nil { + in, out := &in.AllowIntrospectionFunctions, &out.AllowIntrospectionFunctions + *out = new(bool) + **out = **in + } + if in.AllowSuspiciousLowCardinalityTypes != nil { + in, out := &in.AllowSuspiciousLowCardinalityTypes, &out.AllowSuspiciousLowCardinalityTypes + *out = new(bool) + **out = **in + } + if in.AnyJoinDistinctRightTableKeys != nil { + in, out := &in.AnyJoinDistinctRightTableKeys, &out.AnyJoinDistinctRightTableKeys + *out = new(bool) + **out = **in + } + if in.AsyncInsert != nil { + in, out := &in.AsyncInsert, &out.AsyncInsert + *out = new(bool) + **out = **in + } + if in.AsyncInsertBusyTimeout != nil { + in, out := &in.AsyncInsertBusyTimeout, &out.AsyncInsertBusyTimeout + *out = new(float64) + **out = **in + } + if in.AsyncInsertMaxDataSize != nil { + in, out := &in.AsyncInsertMaxDataSize, &out.AsyncInsertMaxDataSize + *out = new(float64) + **out = **in + } + if in.AsyncInsertStaleTimeout != nil { + in, out := &in.AsyncInsertStaleTimeout, &out.AsyncInsertStaleTimeout + *out = new(float64) + **out = **in + } + if in.AsyncInsertThreads != nil { + in, out := &in.AsyncInsertThreads, &out.AsyncInsertThreads + *out = new(float64) + **out = **in + } + if in.CancelHTTPReadonlyQueriesOnClientClose != nil { + in, out := &in.CancelHTTPReadonlyQueriesOnClientClose, &out.CancelHTTPReadonlyQueriesOnClientClose + *out = new(bool) + **out = **in + } + if in.Compile != nil { + in, out := &in.Compile, &out.Compile + *out = new(bool) + **out = **in + } + if in.CompileExpressions != nil { + in, out := &in.CompileExpressions, &out.CompileExpressions + *out = new(bool) + **out = **in + } + if in.ConnectTimeout != nil { + in, out := &in.ConnectTimeout, &out.ConnectTimeout + *out = new(float64) + **out = **in + } + if in.ConnectTimeoutWithFailover != nil { + in, out := &in.ConnectTimeoutWithFailover, &out.ConnectTimeoutWithFailover + *out = new(float64) + **out = **in + } + if in.CountDistinctImplementation != nil { + in, out := &in.CountDistinctImplementation, &out.CountDistinctImplementation + *out = new(string) + **out = **in + } + if in.DateTimeInputFormat != nil { + in, out := &in.DateTimeInputFormat, &out.DateTimeInputFormat + *out = new(string) + **out = **in + } + if in.DateTimeOutputFormat != nil { + in, out := &in.DateTimeOutputFormat, &out.DateTimeOutputFormat + *out = new(string) + **out = **in + } + if in.DeduplicateBlocksInDependentMaterializedViews != nil { + in, out := &in.DeduplicateBlocksInDependentMaterializedViews, &out.DeduplicateBlocksInDependentMaterializedViews + *out = new(bool) + **out = **in + } + if in.DistinctOverflowMode != nil { + in, out := &in.DistinctOverflowMode, &out.DistinctOverflowMode + *out = new(string) + **out = **in + } + if in.DistributedAggregationMemoryEfficient != nil { + in, out := &in.DistributedAggregationMemoryEfficient, &out.DistributedAggregationMemoryEfficient + *out = new(bool) + **out = **in + } + if in.DistributedDdlTaskTimeout != nil { + in, out := &in.DistributedDdlTaskTimeout, &out.DistributedDdlTaskTimeout + *out = new(float64) + **out = **in + } + if in.DistributedProductMode != nil { + in, out := &in.DistributedProductMode, &out.DistributedProductMode + *out = new(string) + **out = **in + } + if in.EmptyResultForAggregationByEmptySet != nil { + in, out := &in.EmptyResultForAggregationByEmptySet, &out.EmptyResultForAggregationByEmptySet + *out = new(bool) + **out = **in + } + if in.EnableHTTPCompression != nil { + in, out := &in.EnableHTTPCompression, &out.EnableHTTPCompression + *out = new(bool) + **out = **in + } + if in.FallbackToStaleReplicasForDistributedQueries != nil { + in, out := &in.FallbackToStaleReplicasForDistributedQueries, &out.FallbackToStaleReplicasForDistributedQueries + *out = new(bool) + **out = **in + } + if in.FlattenNested != nil { + in, out := &in.FlattenNested, &out.FlattenNested + *out = new(bool) + **out = **in + } + if in.ForceIndexByDate != nil { + in, out := &in.ForceIndexByDate, &out.ForceIndexByDate + *out = new(bool) + **out = **in + } + if in.ForcePrimaryKey != nil { + in, out := &in.ForcePrimaryKey, &out.ForcePrimaryKey + *out = new(bool) + **out = **in + } + if in.FormatRegexp != nil { + in, out := &in.FormatRegexp, &out.FormatRegexp + *out = new(string) + **out = **in + } + if in.FormatRegexpSkipUnmatched != nil { + in, out := &in.FormatRegexpSkipUnmatched, &out.FormatRegexpSkipUnmatched + *out = new(bool) + **out = **in + } + if in.GroupByOverflowMode != nil { + in, out := &in.GroupByOverflowMode, &out.GroupByOverflowMode + *out = new(string) + **out = **in + } + if in.GroupByTwoLevelThreshold != nil { + in, out := &in.GroupByTwoLevelThreshold, &out.GroupByTwoLevelThreshold + *out = new(float64) + **out = **in + } + if in.GroupByTwoLevelThresholdBytes != nil { + in, out := &in.GroupByTwoLevelThresholdBytes, &out.GroupByTwoLevelThresholdBytes + *out = new(float64) + **out = **in + } + if in.HTTPConnectionTimeout != nil { + in, out := &in.HTTPConnectionTimeout, &out.HTTPConnectionTimeout + *out = new(float64) + **out = **in + } + if in.HTTPHeadersProgressInterval != nil { + in, out := &in.HTTPHeadersProgressInterval, &out.HTTPHeadersProgressInterval + *out = new(float64) + **out = **in + } + if in.HTTPReceiveTimeout != nil { + in, out := &in.HTTPReceiveTimeout, &out.HTTPReceiveTimeout + *out = new(float64) + **out = **in + } + if in.HTTPSendTimeout != nil { + in, out := &in.HTTPSendTimeout, &out.HTTPSendTimeout + *out = new(float64) + **out = **in + } + if in.HedgedConnectionTimeoutMs != nil { + in, out := &in.HedgedConnectionTimeoutMs, &out.HedgedConnectionTimeoutMs + *out = new(float64) + **out = **in + } + if in.IdleConnectionTimeout != nil { + in, out := &in.IdleConnectionTimeout, &out.IdleConnectionTimeout + *out = new(float64) + **out = **in + } + if in.InputFormatDefaultsForOmittedFields != nil { + in, out := &in.InputFormatDefaultsForOmittedFields, &out.InputFormatDefaultsForOmittedFields + *out = new(bool) + **out = **in + } + if in.InputFormatImportNestedJSON != nil { + in, out := &in.InputFormatImportNestedJSON, &out.InputFormatImportNestedJSON + *out = new(bool) + **out = **in + } + if in.InputFormatNullAsDefault != nil { + in, out := &in.InputFormatNullAsDefault, &out.InputFormatNullAsDefault + *out = new(bool) + **out = **in + } + if in.InputFormatParallelParsing != nil { + in, out := &in.InputFormatParallelParsing, &out.InputFormatParallelParsing + *out = new(bool) + **out = **in + } + if in.InputFormatValuesInterpretExpressions != nil { + in, out := &in.InputFormatValuesInterpretExpressions, &out.InputFormatValuesInterpretExpressions + *out = new(bool) + **out = **in + } + if in.InputFormatWithNamesUseHeader != nil { + in, out := &in.InputFormatWithNamesUseHeader, &out.InputFormatWithNamesUseHeader + *out = new(bool) + **out = **in + } + if in.InsertKeeperMaxRetries != nil { + in, out := &in.InsertKeeperMaxRetries, &out.InsertKeeperMaxRetries + *out = new(float64) + **out = **in + } + if in.InsertNullAsDefault != nil { + in, out := &in.InsertNullAsDefault, &out.InsertNullAsDefault + *out = new(bool) + **out = **in + } + if in.InsertQuorum != nil { + in, out := &in.InsertQuorum, &out.InsertQuorum + *out = new(float64) + **out = **in + } + if in.InsertQuorumParallel != nil { + in, out := &in.InsertQuorumParallel, &out.InsertQuorumParallel + *out = new(bool) + **out = **in + } + if in.InsertQuorumTimeout != nil { + in, out := &in.InsertQuorumTimeout, &out.InsertQuorumTimeout + *out = new(float64) + **out = **in + } + if in.JoinAlgorithm != nil { + in, out := &in.JoinAlgorithm, &out.JoinAlgorithm + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JoinOverflowMode != nil { + in, out := &in.JoinOverflowMode, &out.JoinOverflowMode + *out = new(string) + **out = **in + } + if in.JoinUseNulls != nil { + in, out := &in.JoinUseNulls, &out.JoinUseNulls + *out = new(bool) + **out = **in + } + if in.JoinedSubqueryRequiresAlias != nil { + in, out := &in.JoinedSubqueryRequiresAlias, &out.JoinedSubqueryRequiresAlias + *out = new(bool) + **out = **in + } + if in.LoadBalancing != nil { + in, out := &in.LoadBalancing, &out.LoadBalancing + *out = new(string) + **out = **in + } + if in.LocalFilesystemReadMethod != nil { + in, out := &in.LocalFilesystemReadMethod, &out.LocalFilesystemReadMethod + *out = new(string) + **out = **in + } + if in.LogQueryThreads != nil { + in, out := &in.LogQueryThreads, &out.LogQueryThreads + *out = new(bool) + **out = **in + } + if in.LowCardinalityAllowInNativeFormat != nil { + in, out := &in.LowCardinalityAllowInNativeFormat, &out.LowCardinalityAllowInNativeFormat + *out = new(bool) + **out = **in + } + if in.MaxAstDepth != nil { + in, out := &in.MaxAstDepth, &out.MaxAstDepth + *out = new(float64) + **out = **in + } + if in.MaxAstElements != nil { + in, out := &in.MaxAstElements, &out.MaxAstElements + *out = new(float64) + **out = **in + } + if in.MaxBlockSize != nil { + in, out := &in.MaxBlockSize, &out.MaxBlockSize + *out = new(float64) + **out = **in + } + if in.MaxBytesBeforeExternalGroupBy != nil { + in, out := &in.MaxBytesBeforeExternalGroupBy, &out.MaxBytesBeforeExternalGroupBy + *out = new(float64) + **out = **in + } + if in.MaxBytesBeforeExternalSort != nil { + in, out := &in.MaxBytesBeforeExternalSort, &out.MaxBytesBeforeExternalSort + *out = new(float64) + **out = **in + } + if in.MaxBytesInDistinct != nil { + in, out := &in.MaxBytesInDistinct, &out.MaxBytesInDistinct + *out = new(float64) + **out = **in + } + if in.MaxBytesInJoin != nil { + in, out := &in.MaxBytesInJoin, &out.MaxBytesInJoin + *out = new(float64) + **out = **in + } + if in.MaxBytesInSet != nil { + in, out := &in.MaxBytesInSet, &out.MaxBytesInSet + *out = new(float64) + **out = **in + } + if in.MaxBytesToRead != nil { + in, out := &in.MaxBytesToRead, &out.MaxBytesToRead + *out = new(float64) + **out = **in + } + if in.MaxBytesToSort != nil { + in, out := &in.MaxBytesToSort, &out.MaxBytesToSort + *out = new(float64) + **out = **in + } + if in.MaxBytesToTransfer != nil { + in, out := &in.MaxBytesToTransfer, &out.MaxBytesToTransfer + *out = new(float64) + **out = **in + } + if in.MaxColumnsToRead != nil { + in, out := &in.MaxColumnsToRead, &out.MaxColumnsToRead + *out = new(float64) + **out = **in + } + if in.MaxConcurrentQueriesForUser != nil { + in, out := &in.MaxConcurrentQueriesForUser, &out.MaxConcurrentQueriesForUser + *out = new(float64) + **out = **in + } + if in.MaxExecutionTime != nil { + in, out := &in.MaxExecutionTime, &out.MaxExecutionTime + *out = new(float64) + **out = **in + } + if in.MaxExpandedAstElements != nil { + in, out := &in.MaxExpandedAstElements, &out.MaxExpandedAstElements + *out = new(float64) + **out = **in + } + if in.MaxFinalThreads != nil { + in, out := &in.MaxFinalThreads, &out.MaxFinalThreads + *out = new(float64) + **out = **in + } + if in.MaxHTTPGetRedirects != nil { + in, out := &in.MaxHTTPGetRedirects, &out.MaxHTTPGetRedirects + *out = new(float64) + **out = **in + } + if in.MaxInsertBlockSize != nil { + in, out := &in.MaxInsertBlockSize, &out.MaxInsertBlockSize + *out = new(float64) + **out = **in + } + if in.MaxInsertThreads != nil { + in, out := &in.MaxInsertThreads, &out.MaxInsertThreads + *out = new(float64) + **out = **in + } + if in.MaxMemoryUsage != nil { + in, out := &in.MaxMemoryUsage, &out.MaxMemoryUsage + *out = new(float64) + **out = **in + } + if in.MaxMemoryUsageForUser != nil { + in, out := &in.MaxMemoryUsageForUser, &out.MaxMemoryUsageForUser + *out = new(float64) + **out = **in + } + if in.MaxNetworkBandwidth != nil { + in, out := &in.MaxNetworkBandwidth, &out.MaxNetworkBandwidth + *out = new(float64) + **out = **in + } + if in.MaxNetworkBandwidthForUser != nil { + in, out := &in.MaxNetworkBandwidthForUser, &out.MaxNetworkBandwidthForUser + *out = new(float64) + **out = **in + } + if in.MaxParserDepth != nil { + in, out := &in.MaxParserDepth, &out.MaxParserDepth + *out = new(float64) + **out = **in + } + if in.MaxQuerySize != nil { + in, out := &in.MaxQuerySize, &out.MaxQuerySize + *out = new(float64) + **out = **in + } + if in.MaxReadBufferSize != nil { + in, out := &in.MaxReadBufferSize, &out.MaxReadBufferSize + *out = new(float64) + **out = **in + } + if in.MaxReplicaDelayForDistributedQueries != nil { + in, out := &in.MaxReplicaDelayForDistributedQueries, &out.MaxReplicaDelayForDistributedQueries + *out = new(float64) + **out = **in + } + if in.MaxResultBytes != nil { + in, out := &in.MaxResultBytes, &out.MaxResultBytes + *out = new(float64) + **out = **in + } + if in.MaxResultRows != nil { + in, out := &in.MaxResultRows, &out.MaxResultRows + *out = new(float64) + **out = **in + } + if in.MaxRowsInDistinct != nil { + in, out := &in.MaxRowsInDistinct, &out.MaxRowsInDistinct + *out = new(float64) + **out = **in + } + if in.MaxRowsInJoin != nil { + in, out := &in.MaxRowsInJoin, &out.MaxRowsInJoin + *out = new(float64) + **out = **in + } + if in.MaxRowsInSet != nil { + in, out := &in.MaxRowsInSet, &out.MaxRowsInSet + *out = new(float64) + **out = **in + } + if in.MaxRowsToGroupBy != nil { + in, out := &in.MaxRowsToGroupBy, &out.MaxRowsToGroupBy + *out = new(float64) + **out = **in + } + if in.MaxRowsToRead != nil { + in, out := &in.MaxRowsToRead, &out.MaxRowsToRead + *out = new(float64) + **out = **in + } + if in.MaxRowsToSort != nil { + in, out := &in.MaxRowsToSort, &out.MaxRowsToSort + *out = new(float64) + **out = **in + } + if in.MaxRowsToTransfer != nil { + in, out := &in.MaxRowsToTransfer, &out.MaxRowsToTransfer + *out = new(float64) + **out = **in + } + if in.MaxTemporaryColumns != nil { + in, out := &in.MaxTemporaryColumns, &out.MaxTemporaryColumns + *out = new(float64) + **out = **in + } + if in.MaxTemporaryDataOnDiskSizeForQuery != nil { + in, out := &in.MaxTemporaryDataOnDiskSizeForQuery, &out.MaxTemporaryDataOnDiskSizeForQuery + *out = new(float64) + **out = **in + } + if in.MaxTemporaryDataOnDiskSizeForUser != nil { + in, out := &in.MaxTemporaryDataOnDiskSizeForUser, &out.MaxTemporaryDataOnDiskSizeForUser + *out = new(float64) + **out = **in + } + if in.MaxTemporaryNonConstColumns != nil { + in, out := &in.MaxTemporaryNonConstColumns, &out.MaxTemporaryNonConstColumns + *out = new(float64) + **out = **in + } + if in.MaxThreads != nil { + in, out := &in.MaxThreads, &out.MaxThreads + *out = new(float64) + **out = **in + } + if in.MemoryOvercommitRatioDenominator != nil { + in, out := &in.MemoryOvercommitRatioDenominator, &out.MemoryOvercommitRatioDenominator + *out = new(float64) + **out = **in + } + if in.MemoryOvercommitRatioDenominatorForUser != nil { + in, out := &in.MemoryOvercommitRatioDenominatorForUser, &out.MemoryOvercommitRatioDenominatorForUser + *out = new(float64) + **out = **in + } + if in.MemoryProfilerSampleProbability != nil { + in, out := &in.MemoryProfilerSampleProbability, &out.MemoryProfilerSampleProbability + *out = new(float64) + **out = **in + } + if in.MemoryProfilerStep != nil { + in, out := &in.MemoryProfilerStep, &out.MemoryProfilerStep + *out = new(float64) + **out = **in + } + if in.MemoryUsageOvercommitMaxWaitMicroseconds != nil { + in, out := &in.MemoryUsageOvercommitMaxWaitMicroseconds, &out.MemoryUsageOvercommitMaxWaitMicroseconds + *out = new(float64) + **out = **in + } + if in.MergeTreeMaxBytesToUseCache != nil { + in, out := &in.MergeTreeMaxBytesToUseCache, &out.MergeTreeMaxBytesToUseCache + *out = new(float64) + **out = **in + } + if in.MergeTreeMaxRowsToUseCache != nil { + in, out := &in.MergeTreeMaxRowsToUseCache, &out.MergeTreeMaxRowsToUseCache + *out = new(float64) + **out = **in + } + if in.MergeTreeMinBytesForConcurrentRead != nil { + in, out := &in.MergeTreeMinBytesForConcurrentRead, &out.MergeTreeMinBytesForConcurrentRead + *out = new(float64) + **out = **in + } + if in.MergeTreeMinRowsForConcurrentRead != nil { + in, out := &in.MergeTreeMinRowsForConcurrentRead, &out.MergeTreeMinRowsForConcurrentRead + *out = new(float64) + **out = **in + } + if in.MinBytesToUseDirectIo != nil { + in, out := &in.MinBytesToUseDirectIo, &out.MinBytesToUseDirectIo + *out = new(float64) + **out = **in + } + if in.MinCountToCompile != nil { + in, out := &in.MinCountToCompile, &out.MinCountToCompile + *out = new(float64) + **out = **in + } + if in.MinCountToCompileExpression != nil { + in, out := &in.MinCountToCompileExpression, &out.MinCountToCompileExpression + *out = new(float64) + **out = **in + } + if in.MinExecutionSpeed != nil { + in, out := &in.MinExecutionSpeed, &out.MinExecutionSpeed + *out = new(float64) + **out = **in + } + if in.MinExecutionSpeedBytes != nil { + in, out := &in.MinExecutionSpeedBytes, &out.MinExecutionSpeedBytes + *out = new(float64) + **out = **in + } + if in.MinInsertBlockSizeBytes != nil { + in, out := &in.MinInsertBlockSizeBytes, &out.MinInsertBlockSizeBytes + *out = new(float64) + **out = **in + } + if in.MinInsertBlockSizeRows != nil { + in, out := &in.MinInsertBlockSizeRows, &out.MinInsertBlockSizeRows + *out = new(float64) + **out = **in + } + if in.OutputFormatJSONQuote64BitIntegers != nil { + in, out := &in.OutputFormatJSONQuote64BitIntegers, &out.OutputFormatJSONQuote64BitIntegers + *out = new(bool) + **out = **in + } + if in.OutputFormatJSONQuoteDenormals != nil { + in, out := &in.OutputFormatJSONQuoteDenormals, &out.OutputFormatJSONQuoteDenormals + *out = new(bool) + **out = **in + } + if in.PreferLocalhostReplica != nil { + in, out := &in.PreferLocalhostReplica, &out.PreferLocalhostReplica + *out = new(bool) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.QuotaMode != nil { + in, out := &in.QuotaMode, &out.QuotaMode + *out = new(string) + **out = **in + } + if in.ReadOverflowMode != nil { + in, out := &in.ReadOverflowMode, &out.ReadOverflowMode + *out = new(string) + **out = **in + } + if in.Readonly != nil { + in, out := &in.Readonly, &out.Readonly + *out = new(float64) + **out = **in + } + if in.ReceiveTimeout != nil { + in, out := &in.ReceiveTimeout, &out.ReceiveTimeout + *out = new(float64) + **out = **in + } + if in.RemoteFilesystemReadMethod != nil { + in, out := &in.RemoteFilesystemReadMethod, &out.RemoteFilesystemReadMethod + *out = new(string) + **out = **in + } + if in.ReplicationAlterPartitionsSync != nil { + in, out := &in.ReplicationAlterPartitionsSync, &out.ReplicationAlterPartitionsSync + *out = new(float64) + **out = **in + } + if in.ResultOverflowMode != nil { + in, out := &in.ResultOverflowMode, &out.ResultOverflowMode + *out = new(string) + **out = **in + } + if in.SelectSequentialConsistency != nil { + in, out := &in.SelectSequentialConsistency, &out.SelectSequentialConsistency + *out = new(bool) + **out = **in + } + if in.SendProgressInHTTPHeaders != nil { + in, out := &in.SendProgressInHTTPHeaders, &out.SendProgressInHTTPHeaders + *out = new(bool) + **out = **in + } + if in.SendTimeout != nil { + in, out := &in.SendTimeout, &out.SendTimeout + *out = new(float64) + **out = **in + } + if in.SetOverflowMode != nil { + in, out := &in.SetOverflowMode, &out.SetOverflowMode + *out = new(string) + **out = **in + } + if in.SkipUnavailableShards != nil { + in, out := &in.SkipUnavailableShards, &out.SkipUnavailableShards + *out = new(bool) + **out = **in + } + if in.SortOverflowMode != nil { + in, out := &in.SortOverflowMode, &out.SortOverflowMode + *out = new(string) + **out = **in + } + if in.TimeoutBeforeCheckingExecutionSpeed != nil { + in, out := &in.TimeoutBeforeCheckingExecutionSpeed, &out.TimeoutBeforeCheckingExecutionSpeed + *out = new(float64) + **out = **in + } + if in.TimeoutOverflowMode != nil { + in, out := &in.TimeoutOverflowMode, &out.TimeoutOverflowMode + *out = new(string) + **out = **in + } + if in.TransferOverflowMode != nil { + in, out := &in.TransferOverflowMode, &out.TransferOverflowMode + *out = new(string) + **out = **in + } + if in.TransformNullIn != nil { + in, out := &in.TransformNullIn, &out.TransformNullIn + *out = new(bool) + **out = **in + } + if in.UseHedgedRequests != nil { + in, out := &in.UseHedgedRequests, &out.UseHedgedRequests + *out = new(bool) + **out = **in + } + if in.UseUncompressedCache != nil { + in, out := &in.UseUncompressedCache, &out.UseUncompressedCache + *out = new(bool) + **out = **in + } + if in.WaitForAsyncInsert != nil { + in, out := &in.WaitForAsyncInsert, &out.WaitForAsyncInsert + *out = new(bool) + **out = **in + } + if in.WaitForAsyncInsertTimeout != nil { + in, out := &in.WaitForAsyncInsertTimeout, &out.WaitForAsyncInsertTimeout + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsObservation. +func (in *UserSettingsObservation) DeepCopy() *UserSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsParameters) DeepCopyInto(out *UserSettingsParameters) { + *out = *in + if in.AddHTTPCorsHeader != nil { + in, out := &in.AddHTTPCorsHeader, &out.AddHTTPCorsHeader + *out = new(bool) + **out = **in + } + if in.AllowDdl != nil { + in, out := &in.AllowDdl, &out.AllowDdl + *out = new(bool) + **out = **in + } + if in.AllowIntrospectionFunctions != nil { + in, out := &in.AllowIntrospectionFunctions, &out.AllowIntrospectionFunctions + *out = new(bool) + **out = **in + } + if in.AllowSuspiciousLowCardinalityTypes != nil { + in, out := &in.AllowSuspiciousLowCardinalityTypes, &out.AllowSuspiciousLowCardinalityTypes + *out = new(bool) + **out = **in + } + if in.AnyJoinDistinctRightTableKeys != nil { + in, out := &in.AnyJoinDistinctRightTableKeys, &out.AnyJoinDistinctRightTableKeys + *out = new(bool) + **out = **in + } + if in.AsyncInsert != nil { + in, out := &in.AsyncInsert, &out.AsyncInsert + *out = new(bool) + **out = **in + } + if in.AsyncInsertBusyTimeout != nil { + in, out := &in.AsyncInsertBusyTimeout, &out.AsyncInsertBusyTimeout + *out = new(float64) + **out = **in + } + if in.AsyncInsertMaxDataSize != nil { + in, out := &in.AsyncInsertMaxDataSize, &out.AsyncInsertMaxDataSize + *out = new(float64) + **out = **in + } + if in.AsyncInsertStaleTimeout != nil { + in, out := &in.AsyncInsertStaleTimeout, &out.AsyncInsertStaleTimeout + *out = new(float64) + **out = **in + } + if in.AsyncInsertThreads != nil { + in, out := &in.AsyncInsertThreads, &out.AsyncInsertThreads + *out = new(float64) + **out = **in + } + if in.CancelHTTPReadonlyQueriesOnClientClose != nil { + in, out := &in.CancelHTTPReadonlyQueriesOnClientClose, &out.CancelHTTPReadonlyQueriesOnClientClose + *out = new(bool) + **out = **in + } + if in.Compile != nil { + in, out := &in.Compile, &out.Compile + *out = new(bool) + **out = **in + } + if in.CompileExpressions != nil { + in, out := &in.CompileExpressions, &out.CompileExpressions + *out = new(bool) + **out = **in + } + if in.ConnectTimeout != nil { + in, out := &in.ConnectTimeout, &out.ConnectTimeout + *out = new(float64) + **out = **in + } + if in.ConnectTimeoutWithFailover != nil { + in, out := &in.ConnectTimeoutWithFailover, &out.ConnectTimeoutWithFailover + *out = new(float64) + **out = **in + } + if in.CountDistinctImplementation != nil { + in, out := &in.CountDistinctImplementation, &out.CountDistinctImplementation + *out = new(string) + **out = **in + } + if in.DateTimeInputFormat != nil { + in, out := &in.DateTimeInputFormat, &out.DateTimeInputFormat + *out = new(string) + **out = **in + } + if in.DateTimeOutputFormat != nil { + in, out := &in.DateTimeOutputFormat, &out.DateTimeOutputFormat + *out = new(string) + **out = **in + } + if in.DeduplicateBlocksInDependentMaterializedViews != nil { + in, out := &in.DeduplicateBlocksInDependentMaterializedViews, &out.DeduplicateBlocksInDependentMaterializedViews + *out = new(bool) + **out = **in + } + if in.DistinctOverflowMode != nil { + in, out := &in.DistinctOverflowMode, &out.DistinctOverflowMode + *out = new(string) + **out = **in + } + if in.DistributedAggregationMemoryEfficient != nil { + in, out := &in.DistributedAggregationMemoryEfficient, &out.DistributedAggregationMemoryEfficient + *out = new(bool) + **out = **in + } + if in.DistributedDdlTaskTimeout != nil { + in, out := &in.DistributedDdlTaskTimeout, &out.DistributedDdlTaskTimeout + *out = new(float64) + **out = **in + } + if in.DistributedProductMode != nil { + in, out := &in.DistributedProductMode, &out.DistributedProductMode + *out = new(string) + **out = **in + } + if in.EmptyResultForAggregationByEmptySet != nil { + in, out := &in.EmptyResultForAggregationByEmptySet, &out.EmptyResultForAggregationByEmptySet + *out = new(bool) + **out = **in + } + if in.EnableHTTPCompression != nil { + in, out := &in.EnableHTTPCompression, &out.EnableHTTPCompression + *out = new(bool) + **out = **in + } + if in.FallbackToStaleReplicasForDistributedQueries != nil { + in, out := &in.FallbackToStaleReplicasForDistributedQueries, &out.FallbackToStaleReplicasForDistributedQueries + *out = new(bool) + **out = **in + } + if in.FlattenNested != nil { + in, out := &in.FlattenNested, &out.FlattenNested + *out = new(bool) + **out = **in + } + if in.ForceIndexByDate != nil { + in, out := &in.ForceIndexByDate, &out.ForceIndexByDate + *out = new(bool) + **out = **in + } + if in.ForcePrimaryKey != nil { + in, out := &in.ForcePrimaryKey, &out.ForcePrimaryKey + *out = new(bool) + **out = **in + } + if in.FormatRegexp != nil { + in, out := &in.FormatRegexp, &out.FormatRegexp + *out = new(string) + **out = **in + } + if in.FormatRegexpSkipUnmatched != nil { + in, out := &in.FormatRegexpSkipUnmatched, &out.FormatRegexpSkipUnmatched + *out = new(bool) + **out = **in + } + if in.GroupByOverflowMode != nil { + in, out := &in.GroupByOverflowMode, &out.GroupByOverflowMode + *out = new(string) + **out = **in + } + if in.GroupByTwoLevelThreshold != nil { + in, out := &in.GroupByTwoLevelThreshold, &out.GroupByTwoLevelThreshold + *out = new(float64) + **out = **in + } + if in.GroupByTwoLevelThresholdBytes != nil { + in, out := &in.GroupByTwoLevelThresholdBytes, &out.GroupByTwoLevelThresholdBytes + *out = new(float64) + **out = **in + } + if in.HTTPConnectionTimeout != nil { + in, out := &in.HTTPConnectionTimeout, &out.HTTPConnectionTimeout + *out = new(float64) + **out = **in + } + if in.HTTPHeadersProgressInterval != nil { + in, out := &in.HTTPHeadersProgressInterval, &out.HTTPHeadersProgressInterval + *out = new(float64) + **out = **in + } + if in.HTTPReceiveTimeout != nil { + in, out := &in.HTTPReceiveTimeout, &out.HTTPReceiveTimeout + *out = new(float64) + **out = **in + } + if in.HTTPSendTimeout != nil { + in, out := &in.HTTPSendTimeout, &out.HTTPSendTimeout + *out = new(float64) + **out = **in + } + if in.HedgedConnectionTimeoutMs != nil { + in, out := &in.HedgedConnectionTimeoutMs, &out.HedgedConnectionTimeoutMs + *out = new(float64) + **out = **in + } + if in.IdleConnectionTimeout != nil { + in, out := &in.IdleConnectionTimeout, &out.IdleConnectionTimeout + *out = new(float64) + **out = **in + } + if in.InputFormatDefaultsForOmittedFields != nil { + in, out := &in.InputFormatDefaultsForOmittedFields, &out.InputFormatDefaultsForOmittedFields + *out = new(bool) + **out = **in + } + if in.InputFormatImportNestedJSON != nil { + in, out := &in.InputFormatImportNestedJSON, &out.InputFormatImportNestedJSON + *out = new(bool) + **out = **in + } + if in.InputFormatNullAsDefault != nil { + in, out := &in.InputFormatNullAsDefault, &out.InputFormatNullAsDefault + *out = new(bool) + **out = **in + } + if in.InputFormatParallelParsing != nil { + in, out := &in.InputFormatParallelParsing, &out.InputFormatParallelParsing + *out = new(bool) + **out = **in + } + if in.InputFormatValuesInterpretExpressions != nil { + in, out := &in.InputFormatValuesInterpretExpressions, &out.InputFormatValuesInterpretExpressions + *out = new(bool) + **out = **in + } + if in.InputFormatWithNamesUseHeader != nil { + in, out := &in.InputFormatWithNamesUseHeader, &out.InputFormatWithNamesUseHeader + *out = new(bool) + **out = **in + } + if in.InsertKeeperMaxRetries != nil { + in, out := &in.InsertKeeperMaxRetries, &out.InsertKeeperMaxRetries + *out = new(float64) + **out = **in + } + if in.InsertNullAsDefault != nil { + in, out := &in.InsertNullAsDefault, &out.InsertNullAsDefault + *out = new(bool) + **out = **in + } + if in.InsertQuorum != nil { + in, out := &in.InsertQuorum, &out.InsertQuorum + *out = new(float64) + **out = **in + } + if in.InsertQuorumParallel != nil { + in, out := &in.InsertQuorumParallel, &out.InsertQuorumParallel + *out = new(bool) + **out = **in + } + if in.InsertQuorumTimeout != nil { + in, out := &in.InsertQuorumTimeout, &out.InsertQuorumTimeout + *out = new(float64) + **out = **in + } + if in.JoinAlgorithm != nil { + in, out := &in.JoinAlgorithm, &out.JoinAlgorithm + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JoinOverflowMode != nil { + in, out := &in.JoinOverflowMode, &out.JoinOverflowMode + *out = new(string) + **out = **in + } + if in.JoinUseNulls != nil { + in, out := &in.JoinUseNulls, &out.JoinUseNulls + *out = new(bool) + **out = **in + } + if in.JoinedSubqueryRequiresAlias != nil { + in, out := &in.JoinedSubqueryRequiresAlias, &out.JoinedSubqueryRequiresAlias + *out = new(bool) + **out = **in + } + if in.LoadBalancing != nil { + in, out := &in.LoadBalancing, &out.LoadBalancing + *out = new(string) + **out = **in + } + if in.LocalFilesystemReadMethod != nil { + in, out := &in.LocalFilesystemReadMethod, &out.LocalFilesystemReadMethod + *out = new(string) + **out = **in + } + if in.LogQueryThreads != nil { + in, out := &in.LogQueryThreads, &out.LogQueryThreads + *out = new(bool) + **out = **in + } + if in.LowCardinalityAllowInNativeFormat != nil { + in, out := &in.LowCardinalityAllowInNativeFormat, &out.LowCardinalityAllowInNativeFormat + *out = new(bool) + **out = **in + } + if in.MaxAstDepth != nil { + in, out := &in.MaxAstDepth, &out.MaxAstDepth + *out = new(float64) + **out = **in + } + if in.MaxAstElements != nil { + in, out := &in.MaxAstElements, &out.MaxAstElements + *out = new(float64) + **out = **in + } + if in.MaxBlockSize != nil { + in, out := &in.MaxBlockSize, &out.MaxBlockSize + *out = new(float64) + **out = **in + } + if in.MaxBytesBeforeExternalGroupBy != nil { + in, out := &in.MaxBytesBeforeExternalGroupBy, &out.MaxBytesBeforeExternalGroupBy + *out = new(float64) + **out = **in + } + if in.MaxBytesBeforeExternalSort != nil { + in, out := &in.MaxBytesBeforeExternalSort, &out.MaxBytesBeforeExternalSort + *out = new(float64) + **out = **in + } + if in.MaxBytesInDistinct != nil { + in, out := &in.MaxBytesInDistinct, &out.MaxBytesInDistinct + *out = new(float64) + **out = **in + } + if in.MaxBytesInJoin != nil { + in, out := &in.MaxBytesInJoin, &out.MaxBytesInJoin + *out = new(float64) + **out = **in + } + if in.MaxBytesInSet != nil { + in, out := &in.MaxBytesInSet, &out.MaxBytesInSet + *out = new(float64) + **out = **in + } + if in.MaxBytesToRead != nil { + in, out := &in.MaxBytesToRead, &out.MaxBytesToRead + *out = new(float64) + **out = **in + } + if in.MaxBytesToSort != nil { + in, out := &in.MaxBytesToSort, &out.MaxBytesToSort + *out = new(float64) + **out = **in + } + if in.MaxBytesToTransfer != nil { + in, out := &in.MaxBytesToTransfer, &out.MaxBytesToTransfer + *out = new(float64) + **out = **in + } + if in.MaxColumnsToRead != nil { + in, out := &in.MaxColumnsToRead, &out.MaxColumnsToRead + *out = new(float64) + **out = **in + } + if in.MaxConcurrentQueriesForUser != nil { + in, out := &in.MaxConcurrentQueriesForUser, &out.MaxConcurrentQueriesForUser + *out = new(float64) + **out = **in + } + if in.MaxExecutionTime != nil { + in, out := &in.MaxExecutionTime, &out.MaxExecutionTime + *out = new(float64) + **out = **in + } + if in.MaxExpandedAstElements != nil { + in, out := &in.MaxExpandedAstElements, &out.MaxExpandedAstElements + *out = new(float64) + **out = **in + } + if in.MaxFinalThreads != nil { + in, out := &in.MaxFinalThreads, &out.MaxFinalThreads + *out = new(float64) + **out = **in + } + if in.MaxHTTPGetRedirects != nil { + in, out := &in.MaxHTTPGetRedirects, &out.MaxHTTPGetRedirects + *out = new(float64) + **out = **in + } + if in.MaxInsertBlockSize != nil { + in, out := &in.MaxInsertBlockSize, &out.MaxInsertBlockSize + *out = new(float64) + **out = **in + } + if in.MaxInsertThreads != nil { + in, out := &in.MaxInsertThreads, &out.MaxInsertThreads + *out = new(float64) + **out = **in + } + if in.MaxMemoryUsage != nil { + in, out := &in.MaxMemoryUsage, &out.MaxMemoryUsage + *out = new(float64) + **out = **in + } + if in.MaxMemoryUsageForUser != nil { + in, out := &in.MaxMemoryUsageForUser, &out.MaxMemoryUsageForUser + *out = new(float64) + **out = **in + } + if in.MaxNetworkBandwidth != nil { + in, out := &in.MaxNetworkBandwidth, &out.MaxNetworkBandwidth + *out = new(float64) + **out = **in + } + if in.MaxNetworkBandwidthForUser != nil { + in, out := &in.MaxNetworkBandwidthForUser, &out.MaxNetworkBandwidthForUser + *out = new(float64) + **out = **in + } + if in.MaxParserDepth != nil { + in, out := &in.MaxParserDepth, &out.MaxParserDepth + *out = new(float64) + **out = **in + } + if in.MaxQuerySize != nil { + in, out := &in.MaxQuerySize, &out.MaxQuerySize + *out = new(float64) + **out = **in + } + if in.MaxReadBufferSize != nil { + in, out := &in.MaxReadBufferSize, &out.MaxReadBufferSize + *out = new(float64) + **out = **in + } + if in.MaxReplicaDelayForDistributedQueries != nil { + in, out := &in.MaxReplicaDelayForDistributedQueries, &out.MaxReplicaDelayForDistributedQueries + *out = new(float64) + **out = **in + } + if in.MaxResultBytes != nil { + in, out := &in.MaxResultBytes, &out.MaxResultBytes + *out = new(float64) + **out = **in + } + if in.MaxResultRows != nil { + in, out := &in.MaxResultRows, &out.MaxResultRows + *out = new(float64) + **out = **in + } + if in.MaxRowsInDistinct != nil { + in, out := &in.MaxRowsInDistinct, &out.MaxRowsInDistinct + *out = new(float64) + **out = **in + } + if in.MaxRowsInJoin != nil { + in, out := &in.MaxRowsInJoin, &out.MaxRowsInJoin + *out = new(float64) + **out = **in + } + if in.MaxRowsInSet != nil { + in, out := &in.MaxRowsInSet, &out.MaxRowsInSet + *out = new(float64) + **out = **in + } + if in.MaxRowsToGroupBy != nil { + in, out := &in.MaxRowsToGroupBy, &out.MaxRowsToGroupBy + *out = new(float64) + **out = **in + } + if in.MaxRowsToRead != nil { + in, out := &in.MaxRowsToRead, &out.MaxRowsToRead + *out = new(float64) + **out = **in + } + if in.MaxRowsToSort != nil { + in, out := &in.MaxRowsToSort, &out.MaxRowsToSort + *out = new(float64) + **out = **in + } + if in.MaxRowsToTransfer != nil { + in, out := &in.MaxRowsToTransfer, &out.MaxRowsToTransfer + *out = new(float64) + **out = **in + } + if in.MaxTemporaryColumns != nil { + in, out := &in.MaxTemporaryColumns, &out.MaxTemporaryColumns + *out = new(float64) + **out = **in + } + if in.MaxTemporaryDataOnDiskSizeForQuery != nil { + in, out := &in.MaxTemporaryDataOnDiskSizeForQuery, &out.MaxTemporaryDataOnDiskSizeForQuery + *out = new(float64) + **out = **in + } + if in.MaxTemporaryDataOnDiskSizeForUser != nil { + in, out := &in.MaxTemporaryDataOnDiskSizeForUser, &out.MaxTemporaryDataOnDiskSizeForUser + *out = new(float64) + **out = **in + } + if in.MaxTemporaryNonConstColumns != nil { + in, out := &in.MaxTemporaryNonConstColumns, &out.MaxTemporaryNonConstColumns + *out = new(float64) + **out = **in + } + if in.MaxThreads != nil { + in, out := &in.MaxThreads, &out.MaxThreads + *out = new(float64) + **out = **in + } + if in.MemoryOvercommitRatioDenominator != nil { + in, out := &in.MemoryOvercommitRatioDenominator, &out.MemoryOvercommitRatioDenominator + *out = new(float64) + **out = **in + } + if in.MemoryOvercommitRatioDenominatorForUser != nil { + in, out := &in.MemoryOvercommitRatioDenominatorForUser, &out.MemoryOvercommitRatioDenominatorForUser + *out = new(float64) + **out = **in + } + if in.MemoryProfilerSampleProbability != nil { + in, out := &in.MemoryProfilerSampleProbability, &out.MemoryProfilerSampleProbability + *out = new(float64) + **out = **in + } + if in.MemoryProfilerStep != nil { + in, out := &in.MemoryProfilerStep, &out.MemoryProfilerStep + *out = new(float64) + **out = **in + } + if in.MemoryUsageOvercommitMaxWaitMicroseconds != nil { + in, out := &in.MemoryUsageOvercommitMaxWaitMicroseconds, &out.MemoryUsageOvercommitMaxWaitMicroseconds + *out = new(float64) + **out = **in + } + if in.MergeTreeMaxBytesToUseCache != nil { + in, out := &in.MergeTreeMaxBytesToUseCache, &out.MergeTreeMaxBytesToUseCache + *out = new(float64) + **out = **in + } + if in.MergeTreeMaxRowsToUseCache != nil { + in, out := &in.MergeTreeMaxRowsToUseCache, &out.MergeTreeMaxRowsToUseCache + *out = new(float64) + **out = **in + } + if in.MergeTreeMinBytesForConcurrentRead != nil { + in, out := &in.MergeTreeMinBytesForConcurrentRead, &out.MergeTreeMinBytesForConcurrentRead + *out = new(float64) + **out = **in + } + if in.MergeTreeMinRowsForConcurrentRead != nil { + in, out := &in.MergeTreeMinRowsForConcurrentRead, &out.MergeTreeMinRowsForConcurrentRead + *out = new(float64) + **out = **in + } + if in.MinBytesToUseDirectIo != nil { + in, out := &in.MinBytesToUseDirectIo, &out.MinBytesToUseDirectIo + *out = new(float64) + **out = **in + } + if in.MinCountToCompile != nil { + in, out := &in.MinCountToCompile, &out.MinCountToCompile + *out = new(float64) + **out = **in + } + if in.MinCountToCompileExpression != nil { + in, out := &in.MinCountToCompileExpression, &out.MinCountToCompileExpression + *out = new(float64) + **out = **in + } + if in.MinExecutionSpeed != nil { + in, out := &in.MinExecutionSpeed, &out.MinExecutionSpeed + *out = new(float64) + **out = **in + } + if in.MinExecutionSpeedBytes != nil { + in, out := &in.MinExecutionSpeedBytes, &out.MinExecutionSpeedBytes + *out = new(float64) + **out = **in + } + if in.MinInsertBlockSizeBytes != nil { + in, out := &in.MinInsertBlockSizeBytes, &out.MinInsertBlockSizeBytes + *out = new(float64) + **out = **in + } + if in.MinInsertBlockSizeRows != nil { + in, out := &in.MinInsertBlockSizeRows, &out.MinInsertBlockSizeRows + *out = new(float64) + **out = **in + } + if in.OutputFormatJSONQuote64BitIntegers != nil { + in, out := &in.OutputFormatJSONQuote64BitIntegers, &out.OutputFormatJSONQuote64BitIntegers + *out = new(bool) + **out = **in + } + if in.OutputFormatJSONQuoteDenormals != nil { + in, out := &in.OutputFormatJSONQuoteDenormals, &out.OutputFormatJSONQuoteDenormals + *out = new(bool) + **out = **in + } + if in.PreferLocalhostReplica != nil { + in, out := &in.PreferLocalhostReplica, &out.PreferLocalhostReplica + *out = new(bool) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.QuotaMode != nil { + in, out := &in.QuotaMode, &out.QuotaMode + *out = new(string) + **out = **in + } + if in.ReadOverflowMode != nil { + in, out := &in.ReadOverflowMode, &out.ReadOverflowMode + *out = new(string) + **out = **in + } + if in.Readonly != nil { + in, out := &in.Readonly, &out.Readonly + *out = new(float64) + **out = **in + } + if in.ReceiveTimeout != nil { + in, out := &in.ReceiveTimeout, &out.ReceiveTimeout + *out = new(float64) + **out = **in + } + if in.RemoteFilesystemReadMethod != nil { + in, out := &in.RemoteFilesystemReadMethod, &out.RemoteFilesystemReadMethod + *out = new(string) + **out = **in + } + if in.ReplicationAlterPartitionsSync != nil { + in, out := &in.ReplicationAlterPartitionsSync, &out.ReplicationAlterPartitionsSync + *out = new(float64) + **out = **in + } + if in.ResultOverflowMode != nil { + in, out := &in.ResultOverflowMode, &out.ResultOverflowMode + *out = new(string) + **out = **in + } + if in.SelectSequentialConsistency != nil { + in, out := &in.SelectSequentialConsistency, &out.SelectSequentialConsistency + *out = new(bool) + **out = **in + } + if in.SendProgressInHTTPHeaders != nil { + in, out := &in.SendProgressInHTTPHeaders, &out.SendProgressInHTTPHeaders + *out = new(bool) + **out = **in + } + if in.SendTimeout != nil { + in, out := &in.SendTimeout, &out.SendTimeout + *out = new(float64) + **out = **in + } + if in.SetOverflowMode != nil { + in, out := &in.SetOverflowMode, &out.SetOverflowMode + *out = new(string) + **out = **in + } + if in.SkipUnavailableShards != nil { + in, out := &in.SkipUnavailableShards, &out.SkipUnavailableShards + *out = new(bool) + **out = **in + } + if in.SortOverflowMode != nil { + in, out := &in.SortOverflowMode, &out.SortOverflowMode + *out = new(string) + **out = **in + } + if in.TimeoutBeforeCheckingExecutionSpeed != nil { + in, out := &in.TimeoutBeforeCheckingExecutionSpeed, &out.TimeoutBeforeCheckingExecutionSpeed + *out = new(float64) + **out = **in + } + if in.TimeoutOverflowMode != nil { + in, out := &in.TimeoutOverflowMode, &out.TimeoutOverflowMode + *out = new(string) + **out = **in + } + if in.TransferOverflowMode != nil { + in, out := &in.TransferOverflowMode, &out.TransferOverflowMode + *out = new(string) + **out = **in + } + if in.TransformNullIn != nil { + in, out := &in.TransformNullIn, &out.TransformNullIn + *out = new(bool) + **out = **in + } + if in.UseHedgedRequests != nil { + in, out := &in.UseHedgedRequests, &out.UseHedgedRequests + *out = new(bool) + **out = **in + } + if in.UseUncompressedCache != nil { + in, out := &in.UseUncompressedCache, &out.UseUncompressedCache + *out = new(bool) + **out = **in + } + if in.WaitForAsyncInsert != nil { + in, out := &in.WaitForAsyncInsert, &out.WaitForAsyncInsert + *out = new(bool) + **out = **in + } + if in.WaitForAsyncInsertTimeout != nil { + in, out := &in.WaitForAsyncInsertTimeout, &out.WaitForAsyncInsertTimeout + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsParameters. +func (in *UserSettingsParameters) DeepCopy() *UserSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WiredTigerInitParameters) DeepCopyInto(out *WiredTigerInitParameters) { + *out = *in + if in.CacheSizeGb != nil { + in, out := &in.CacheSizeGb, &out.CacheSizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WiredTigerInitParameters. +func (in *WiredTigerInitParameters) DeepCopy() *WiredTigerInitParameters { + if in == nil { + return nil + } + out := new(WiredTigerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WiredTigerObservation) DeepCopyInto(out *WiredTigerObservation) { + *out = *in + if in.CacheSizeGb != nil { + in, out := &in.CacheSizeGb, &out.CacheSizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WiredTigerObservation. +func (in *WiredTigerObservation) DeepCopy() *WiredTigerObservation { + if in == nil { + return nil + } + out := new(WiredTigerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WiredTigerParameters) DeepCopyInto(out *WiredTigerParameters) { + *out = *in + if in.CacheSizeGb != nil { + in, out := &in.CacheSizeGb, &out.CacheSizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WiredTigerParameters. +func (in *WiredTigerParameters) DeepCopy() *WiredTigerParameters { + if in == nil { + return nil + } + out := new(WiredTigerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperInitParameters) DeepCopyInto(out *ZookeeperInitParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ZookeeperResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperInitParameters. +func (in *ZookeeperInitParameters) DeepCopy() *ZookeeperInitParameters { + if in == nil { + return nil + } + out := new(ZookeeperInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperObservation) DeepCopyInto(out *ZookeeperObservation) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ZookeeperResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperObservation. +func (in *ZookeeperObservation) DeepCopy() *ZookeeperObservation { + if in == nil { + return nil + } + out := new(ZookeeperObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperParameters) DeepCopyInto(out *ZookeeperParameters) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ZookeeperResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperParameters. +func (in *ZookeeperParameters) DeepCopy() *ZookeeperParameters { + if in == nil { + return nil + } + out := new(ZookeeperParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperResourcesInitParameters) DeepCopyInto(out *ZookeeperResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperResourcesInitParameters. +func (in *ZookeeperResourcesInitParameters) DeepCopy() *ZookeeperResourcesInitParameters { + if in == nil { + return nil + } + out := new(ZookeeperResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperResourcesObservation) DeepCopyInto(out *ZookeeperResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperResourcesObservation. +func (in *ZookeeperResourcesObservation) DeepCopy() *ZookeeperResourcesObservation { + if in == nil { + return nil + } + out := new(ZookeeperResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperResourcesParameters) DeepCopyInto(out *ZookeeperResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperResourcesParameters. +func (in *ZookeeperResourcesParameters) DeepCopy() *ZookeeperResourcesParameters { + if in == nil { + return nil + } + out := new(ZookeeperResourcesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/mdb/v1alpha1/zz_generated.resolvers.go b/apis/mdb/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..aca208f --- /dev/null +++ b/apis/mdb/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,1804 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this ClickhouseCluster. +func (mg *ClickhouseCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Host[i3].SubnetID") + } + mg.Spec.ForProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Host[i3].SubnetID") + } + mg.Spec.InitProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ElasticsearchCluster. +func (mg *ElasticsearchCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Host[i3].SubnetID") + } + mg.Spec.ForProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Host[i3].SubnetID") + } + mg.Spec.InitProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this GreenplumCluster. +func (mg *GreenplumCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this KafkaCluster. +func (mg *KafkaCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetIdsRefs, + Selector: mg.Spec.ForProvider.SubnetIdsSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetIds") + } + mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetIdsRefs, + Selector: mg.Spec.InitProvider.SubnetIdsSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetIds") + } + mg.Spec.InitProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this KafkaConnector. +func (mg *KafkaConnector) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &KafkaClusterList{}, + Managed: &KafkaCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &KafkaClusterList{}, + Managed: &KafkaCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this KafkaTopic. +func (mg *KafkaTopic) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &KafkaClusterList{}, + Managed: &KafkaCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &KafkaClusterList{}, + Managed: &KafkaCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this KafkaUser. +func (mg *KafkaUser) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &KafkaClusterList{}, + Managed: &KafkaCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &KafkaClusterList{}, + Managed: &KafkaCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MongodbCluster. +func (mg *MongodbCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Host[i3].SubnetID") + } + mg.Spec.ForProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Host[i3].SubnetID") + } + mg.Spec.InitProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this MongodbDatabase. +func (mg *MongodbDatabase) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &MongodbClusterList{}, + Managed: &MongodbCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &MongodbClusterList{}, + Managed: &MongodbCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MongodbUser. +func (mg *MongodbUser) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &MongodbClusterList{}, + Managed: &MongodbCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &MongodbClusterList{}, + Managed: &MongodbCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MySQLCluster. +func (mg *MySQLCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Host[i3].SubnetID") + } + mg.Spec.ForProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Host[i3].SubnetID") + } + mg.Spec.InitProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this MySQLDatabase. +func (mg *MySQLDatabase) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &MySQLClusterList{}, + Managed: &MySQLCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &MySQLClusterList{}, + Managed: &MySQLCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MySQLUser. +func (mg *MySQLUser) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &MySQLClusterList{}, + Managed: &MySQLCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Permission); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Permission[i3].DatabaseName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.Permission[i3].DatabaseNameRef, + Selector: mg.Spec.ForProvider.Permission[i3].DatabaseNameSelector, + To: reference.To{ + List: &MySQLDatabaseList{}, + Managed: &MySQLDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Permission[i3].DatabaseName") + } + mg.Spec.ForProvider.Permission[i3].DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Permission[i3].DatabaseNameRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &MySQLClusterList{}, + Managed: &MySQLCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Permission); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Permission[i3].DatabaseName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.Permission[i3].DatabaseNameRef, + Selector: mg.Spec.InitProvider.Permission[i3].DatabaseNameSelector, + To: reference.To{ + List: &MySQLDatabaseList{}, + Managed: &MySQLDatabase{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Permission[i3].DatabaseName") + } + mg.Spec.InitProvider.Permission[i3].DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Permission[i3].DatabaseNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this PostgresqlCluster. +func (mg *PostgresqlCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Host[i3].SubnetID") + } + mg.Spec.ForProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Host[i3].SubnetID") + } + mg.Spec.InitProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this PostgresqlDatabase. +func (mg *PostgresqlDatabase) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &PostgresqlClusterList{}, + Managed: &PostgresqlCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Owner), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.OwnerRef, + Selector: mg.Spec.ForProvider.OwnerSelector, + To: reference.To{ + List: &PostgresqlUserList{}, + Managed: &PostgresqlUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Owner") + } + mg.Spec.ForProvider.Owner = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OwnerRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &PostgresqlClusterList{}, + Managed: &PostgresqlCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Owner), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.OwnerRef, + Selector: mg.Spec.InitProvider.OwnerSelector, + To: reference.To{ + List: &PostgresqlUserList{}, + Managed: &PostgresqlUser{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Owner") + } + mg.Spec.InitProvider.Owner = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OwnerRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this PostgresqlUser. +func (mg *PostgresqlUser) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterIDRef, + Selector: mg.Spec.ForProvider.ClusterIDSelector, + To: reference.To{ + List: &PostgresqlClusterList{}, + Managed: &PostgresqlCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterID") + } + mg.Spec.ForProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterIDRef, + Selector: mg.Spec.InitProvider.ClusterIDSelector, + To: reference.To{ + List: &PostgresqlClusterList{}, + Managed: &PostgresqlCluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterID") + } + mg.Spec.InitProvider.ClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RedisCluster. +func (mg *RedisCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Host[i3].SubnetID") + } + mg.Spec.ForProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Host[i3].SubnetID") + } + mg.Spec.InitProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this SqlserverCluster. +func (mg *SqlserverCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Host[i3].SubnetID") + } + mg.Spec.ForProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Host); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Host[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Host[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.Host[i3].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Host[i3].SubnetID") + } + mg.Spec.InitProvider.Host[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Host[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIdsRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIdsSelector, + To: reference.To{ + List: &v1alpha11.SecurityGroupList{}, + Managed: &v1alpha11.SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIdsRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/mdb/v1alpha1/zz_greenplumcluster_terraformed.go b/apis/mdb/v1alpha1/zz_greenplumcluster_terraformed.go new file mode 100755 index 0000000..bf3be04 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_greenplumcluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GreenplumCluster +func (mg *GreenplumCluster) GetTerraformResourceType() string { + return "yandex_mdb_greenplum_cluster" +} + +// GetConnectionDetailsMapping for this GreenplumCluster +func (tr *GreenplumCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"user_password": "userPasswordSecretRef"} +} + +// GetObservation of this GreenplumCluster +func (tr *GreenplumCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GreenplumCluster +func (tr *GreenplumCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GreenplumCluster +func (tr *GreenplumCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GreenplumCluster +func (tr *GreenplumCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GreenplumCluster +func (tr *GreenplumCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GreenplumCluster +func (tr *GreenplumCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GreenplumCluster +func (tr *GreenplumCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GreenplumCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GreenplumCluster) LateInitialize(attrs []byte) (bool, error) { + params := &GreenplumClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GreenplumCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_greenplumcluster_types.go b/apis/mdb/v1alpha1/zz_greenplumcluster_types.go index e8b584b..2e30b9a 100755 --- a/apis/mdb/v1alpha1/zz_greenplumcluster_types.go +++ b/apis/mdb/v1alpha1/zz_greenplumcluster_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,1124 +7,1009 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AnalyzeAndVacuumInitParameters struct { + // Maximum duration of the ANALYZE operation, in seconds. The default value is 36000. As soon as this period expires, the ANALYZE operation will be forced to terminate. + AnalyzeTimeout *float64 `json:"analyzeTimeout,omitempty" tf:"analyze_timeout,omitempty"` -// Maximum duration of the ANALYZE operation, in seconds. The default value is 36000. As soon as this period expires, the ANALYZE operation will be forced to terminate. -AnalyzeTimeout *float64 `json:"analyzeTimeout,omitempty" tf:"analyze_timeout,omitempty"` - -// Time of day in 'HH:MM' format when scripts should run. -StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + // Time of day in 'HH:MM' format when scripts should run. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` -// Maximum duration of the VACUUM operation, in seconds. The default value is 36000. As soon as this period expires, the VACUUM operation will be forced to terminate. -VacuumTimeout *float64 `json:"vacuumTimeout,omitempty" tf:"vacuum_timeout,omitempty"` + // Maximum duration of the VACUUM operation, in seconds. The default value is 36000. As soon as this period expires, the VACUUM operation will be forced to terminate. + VacuumTimeout *float64 `json:"vacuumTimeout,omitempty" tf:"vacuum_timeout,omitempty"` } - type AnalyzeAndVacuumObservation struct { + // Maximum duration of the ANALYZE operation, in seconds. The default value is 36000. As soon as this period expires, the ANALYZE operation will be forced to terminate. + AnalyzeTimeout *float64 `json:"analyzeTimeout,omitempty" tf:"analyze_timeout,omitempty"` -// Maximum duration of the ANALYZE operation, in seconds. The default value is 36000. As soon as this period expires, the ANALYZE operation will be forced to terminate. -AnalyzeTimeout *float64 `json:"analyzeTimeout,omitempty" tf:"analyze_timeout,omitempty"` - -// Time of day in 'HH:MM' format when scripts should run. -StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + // Time of day in 'HH:MM' format when scripts should run. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` -// Maximum duration of the VACUUM operation, in seconds. The default value is 36000. As soon as this period expires, the VACUUM operation will be forced to terminate. -VacuumTimeout *float64 `json:"vacuumTimeout,omitempty" tf:"vacuum_timeout,omitempty"` + // Maximum duration of the VACUUM operation, in seconds. The default value is 36000. As soon as this period expires, the VACUUM operation will be forced to terminate. + VacuumTimeout *float64 `json:"vacuumTimeout,omitempty" tf:"vacuum_timeout,omitempty"` } - type AnalyzeAndVacuumParameters struct { + // Maximum duration of the ANALYZE operation, in seconds. The default value is 36000. As soon as this period expires, the ANALYZE operation will be forced to terminate. + // +kubebuilder:validation:Optional + AnalyzeTimeout *float64 `json:"analyzeTimeout,omitempty" tf:"analyze_timeout,omitempty"` -// Maximum duration of the ANALYZE operation, in seconds. The default value is 36000. As soon as this period expires, the ANALYZE operation will be forced to terminate. -// +kubebuilder:validation:Optional -AnalyzeTimeout *float64 `json:"analyzeTimeout,omitempty" tf:"analyze_timeout,omitempty"` - -// Time of day in 'HH:MM' format when scripts should run. -// +kubebuilder:validation:Optional -StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + // Time of day in 'HH:MM' format when scripts should run. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` -// Maximum duration of the VACUUM operation, in seconds. The default value is 36000. As soon as this period expires, the VACUUM operation will be forced to terminate. -// +kubebuilder:validation:Optional -VacuumTimeout *float64 `json:"vacuumTimeout,omitempty" tf:"vacuum_timeout,omitempty"` + // Maximum duration of the VACUUM operation, in seconds. The default value is 36000. As soon as this period expires, the VACUUM operation will be forced to terminate. + // +kubebuilder:validation:Optional + VacuumTimeout *float64 `json:"vacuumTimeout,omitempty" tf:"vacuum_timeout,omitempty"` } - type BackgroundActivitiesInitParameters struct { + // Block to configure 'ANALYZE' and 'VACUUM' daily operations. + AnalyzeAndVacuum []AnalyzeAndVacuumInitParameters `json:"analyzeAndVacuum,omitempty" tf:"analyze_and_vacuum,omitempty"` -// Block to configure 'ANALYZE' and 'VACUUM' daily operations. -AnalyzeAndVacuum []AnalyzeAndVacuumInitParameters `json:"analyzeAndVacuum,omitempty" tf:"analyze_and_vacuum,omitempty"` - -// Block to configure script that kills long running queries that are in idle state. -QueryKillerIdle []QueryKillerIdleInitParameters `json:"queryKillerIdle,omitempty" tf:"query_killer_idle,omitempty"` + // Block to configure script that kills long running queries that are in idle state. + QueryKillerIdle []QueryKillerIdleInitParameters `json:"queryKillerIdle,omitempty" tf:"query_killer_idle,omitempty"` -// block to configure script that kills long running queries that are in idle in transaction state. -QueryKillerIdleInTransaction []QueryKillerIdleInTransactionInitParameters `json:"queryKillerIdleInTransaction,omitempty" tf:"query_killer_idle_in_transaction,omitempty"` + // block to configure script that kills long running queries that are in idle in transaction state. + QueryKillerIdleInTransaction []QueryKillerIdleInTransactionInitParameters `json:"queryKillerIdleInTransaction,omitempty" tf:"query_killer_idle_in_transaction,omitempty"` -// block to configure script that kills long running queries (in any state). -QueryKillerLongRunning []QueryKillerLongRunningInitParameters `json:"queryKillerLongRunning,omitempty" tf:"query_killer_long_running,omitempty"` + // block to configure script that kills long running queries (in any state). + QueryKillerLongRunning []QueryKillerLongRunningInitParameters `json:"queryKillerLongRunning,omitempty" tf:"query_killer_long_running,omitempty"` } - type BackgroundActivitiesObservation struct { + // Block to configure 'ANALYZE' and 'VACUUM' daily operations. + AnalyzeAndVacuum []AnalyzeAndVacuumObservation `json:"analyzeAndVacuum,omitempty" tf:"analyze_and_vacuum,omitempty"` -// Block to configure 'ANALYZE' and 'VACUUM' daily operations. -AnalyzeAndVacuum []AnalyzeAndVacuumObservation `json:"analyzeAndVacuum,omitempty" tf:"analyze_and_vacuum,omitempty"` - -// Block to configure script that kills long running queries that are in idle state. -QueryKillerIdle []QueryKillerIdleObservation `json:"queryKillerIdle,omitempty" tf:"query_killer_idle,omitempty"` + // Block to configure script that kills long running queries that are in idle state. + QueryKillerIdle []QueryKillerIdleObservation `json:"queryKillerIdle,omitempty" tf:"query_killer_idle,omitempty"` -// block to configure script that kills long running queries that are in idle in transaction state. -QueryKillerIdleInTransaction []QueryKillerIdleInTransactionObservation `json:"queryKillerIdleInTransaction,omitempty" tf:"query_killer_idle_in_transaction,omitempty"` + // block to configure script that kills long running queries that are in idle in transaction state. + QueryKillerIdleInTransaction []QueryKillerIdleInTransactionObservation `json:"queryKillerIdleInTransaction,omitempty" tf:"query_killer_idle_in_transaction,omitempty"` -// block to configure script that kills long running queries (in any state). -QueryKillerLongRunning []QueryKillerLongRunningObservation `json:"queryKillerLongRunning,omitempty" tf:"query_killer_long_running,omitempty"` + // block to configure script that kills long running queries (in any state). + QueryKillerLongRunning []QueryKillerLongRunningObservation `json:"queryKillerLongRunning,omitempty" tf:"query_killer_long_running,omitempty"` } - type BackgroundActivitiesParameters struct { + // Block to configure 'ANALYZE' and 'VACUUM' daily operations. + // +kubebuilder:validation:Optional + AnalyzeAndVacuum []AnalyzeAndVacuumParameters `json:"analyzeAndVacuum,omitempty" tf:"analyze_and_vacuum,omitempty"` -// Block to configure 'ANALYZE' and 'VACUUM' daily operations. -// +kubebuilder:validation:Optional -AnalyzeAndVacuum []AnalyzeAndVacuumParameters `json:"analyzeAndVacuum,omitempty" tf:"analyze_and_vacuum,omitempty"` - -// Block to configure script that kills long running queries that are in idle state. -// +kubebuilder:validation:Optional -QueryKillerIdle []QueryKillerIdleParameters `json:"queryKillerIdle,omitempty" tf:"query_killer_idle,omitempty"` + // Block to configure script that kills long running queries that are in idle state. + // +kubebuilder:validation:Optional + QueryKillerIdle []QueryKillerIdleParameters `json:"queryKillerIdle,omitempty" tf:"query_killer_idle,omitempty"` -// block to configure script that kills long running queries that are in idle in transaction state. -// +kubebuilder:validation:Optional -QueryKillerIdleInTransaction []QueryKillerIdleInTransactionParameters `json:"queryKillerIdleInTransaction,omitempty" tf:"query_killer_idle_in_transaction,omitempty"` + // block to configure script that kills long running queries that are in idle in transaction state. + // +kubebuilder:validation:Optional + QueryKillerIdleInTransaction []QueryKillerIdleInTransactionParameters `json:"queryKillerIdleInTransaction,omitempty" tf:"query_killer_idle_in_transaction,omitempty"` -// block to configure script that kills long running queries (in any state). -// +kubebuilder:validation:Optional -QueryKillerLongRunning []QueryKillerLongRunningParameters `json:"queryKillerLongRunning,omitempty" tf:"query_killer_long_running,omitempty"` + // block to configure script that kills long running queries (in any state). + // +kubebuilder:validation:Optional + QueryKillerLongRunning []QueryKillerLongRunningParameters `json:"queryKillerLongRunning,omitempty" tf:"query_killer_long_running,omitempty"` } - type GreenplumClusterAccessInitParameters struct { + // Allow access for Yandex DataLens. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for Yandex DataLens. -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` - -// Allow access for DataTransfer -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` -// Allows access for SQL queries in the management console. -WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` + // Allows access for SQL queries in the management console. + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` -// Allow access for Yandex Query -YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` + // Allow access for Yandex Query + YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` } - type GreenplumClusterAccessObservation struct { + // Allow access for Yandex DataLens. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for Yandex DataLens. -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` - -// Allow access for DataTransfer -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` -// Allows access for SQL queries in the management console. -WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` + // Allows access for SQL queries in the management console. + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` -// Allow access for Yandex Query -YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` + // Allow access for Yandex Query + YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` } - type GreenplumClusterAccessParameters struct { + // Allow access for Yandex DataLens. + // +kubebuilder:validation:Optional + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for Yandex DataLens. -// +kubebuilder:validation:Optional -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` - -// Allow access for DataTransfer -// +kubebuilder:validation:Optional -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + // +kubebuilder:validation:Optional + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` -// Allows access for SQL queries in the management console. -// +kubebuilder:validation:Optional -WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` + // Allows access for SQL queries in the management console. + // +kubebuilder:validation:Optional + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` -// Allow access for Yandex Query -// +kubebuilder:validation:Optional -YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` + // Allow access for Yandex Query + // +kubebuilder:validation:Optional + YandexQuery *bool `json:"yandexQuery,omitempty" tf:"yandex_query,omitempty"` } - type GreenplumClusterBackupWindowStartInitParameters struct { + // The hour at which backup will be started (UTC). + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started (UTC). -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started (UTC). -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started (UTC). + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type GreenplumClusterBackupWindowStartObservation struct { + // The hour at which backup will be started (UTC). + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started (UTC). -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started (UTC). -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started (UTC). + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type GreenplumClusterBackupWindowStartParameters struct { + // The hour at which backup will be started (UTC). + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started (UTC). -// +kubebuilder:validation:Optional -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started (UTC). -// +kubebuilder:validation:Optional -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started (UTC). + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type GreenplumClusterCloudStorageInitParameters struct { - -// Whether to use cloud storage or not. -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // Whether to use cloud storage or not. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` } - type GreenplumClusterCloudStorageObservation struct { - -// Whether to use cloud storage or not. -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // Whether to use cloud storage or not. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` } - type GreenplumClusterCloudStorageParameters struct { - -// Whether to use cloud storage or not. -// +kubebuilder:validation:Optional -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // Whether to use cloud storage or not. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` } - type GreenplumClusterInitParameters struct { + // Access policy to the Greenplum cluster. The structure is documented below. + Access []GreenplumClusterAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the Greenplum cluster. The structure is documented below. -Access []GreenplumClusterAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` - -// Sets whether the master hosts should get a public IP address on creation. Changing this parameter for an existing host is not supported at the moment. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // Sets whether the master hosts should get a public IP address on creation. Changing this parameter for an existing host is not supported at the moment. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -BackgroundActivities []BackgroundActivitiesInitParameters `json:"backgroundActivities,omitempty" tf:"background_activities,omitempty"` + BackgroundActivities []BackgroundActivitiesInitParameters `json:"backgroundActivities,omitempty" tf:"background_activities,omitempty"` -// Time to start the daily backup, in the UTC timezone. The structure is documented below. -BackupWindowStart []GreenplumClusterBackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + BackupWindowStart []GreenplumClusterBackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Cloud Storage settings of the Greenplum cluster. The structure is documented below. -CloudStorage []GreenplumClusterCloudStorageInitParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` + // Cloud Storage settings of the Greenplum cluster. The structure is documented below. + CloudStorage []GreenplumClusterCloudStorageInitParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the Greenplum cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Greenplum cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Deployment environment of the Greenplum cluster. (PRODUCTION, PRESTABLE) -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Deployment environment of the Greenplum cluster. (PRODUCTION, PRESTABLE) + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Greenplum cluster config. Detail info in "Greenplum cluster settings" section (documented below). -// +mapType=granular -GreenplumConfig map[string]*string `json:"greenplumConfig,omitempty" tf:"greenplum_config,omitempty"` + // Greenplum cluster config. Detail info in "Greenplum cluster settings" section (documented below). + // +mapType=granular + GreenplumConfig map[string]*string `json:"greenplumConfig,omitempty" tf:"greenplum_config,omitempty"` -// A set of key/value label pairs to assign to the Greenplum cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Greenplum cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Maintenance policy of the Greenplum cluster. The structure is documented below. -MaintenanceWindow []GreenplumClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Maintenance policy of the Greenplum cluster. The structure is documented below. + MaintenanceWindow []GreenplumClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Number of hosts in master subcluster (1 or 2). -MasterHostCount *float64 `json:"masterHostCount,omitempty" tf:"master_host_count,omitempty"` + // Number of hosts in master subcluster (1 or 2). + MasterHostCount *float64 `json:"masterHostCount,omitempty" tf:"master_host_count,omitempty"` -// A list of IDs of the host groups to place master subclusters' VMs of the cluster on. -// +listType=set -MasterHostGroupIds []*string `json:"masterHostGroupIds,omitempty" tf:"master_host_group_ids,omitempty"` + // A list of IDs of the host groups to place master subclusters' VMs of the cluster on. + // +listType=set + MasterHostGroupIds []*string `json:"masterHostGroupIds,omitempty" tf:"master_host_group_ids,omitempty"` -// Settings for master subcluster. The structure is documented below. -MasterSubcluster []MasterSubclusterInitParameters `json:"masterSubcluster,omitempty" tf:"master_subcluster,omitempty"` + // Settings for master subcluster. The structure is documented below. + MasterSubcluster []MasterSubclusterInitParameters `json:"masterSubcluster,omitempty" tf:"master_subcluster,omitempty"` -// Name of the Greenplum cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Greenplum cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network, to which the Greenplum cluster uses. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network, to which the Greenplum cluster uses. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// Configuration of the connection pooler. The structure is documented below. -PoolerConfig []PoolerConfigInitParameters `json:"poolerConfig,omitempty" tf:"pooler_config,omitempty"` + // Configuration of the connection pooler. The structure is documented below. + PoolerConfig []PoolerConfigInitParameters `json:"poolerConfig,omitempty" tf:"pooler_config,omitempty"` -// Configuration of the PXF daemon. The structure is documented below. -PxfConfig []PxfConfigInitParameters `json:"pxfConfig,omitempty" tf:"pxf_config,omitempty"` + // Configuration of the PXF daemon. The structure is documented below. + PxfConfig []PxfConfigInitParameters `json:"pxfConfig,omitempty" tf:"pxf_config,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` -// Number of hosts in segment subcluster (from 1 to 32). -SegmentHostCount *float64 `json:"segmentHostCount,omitempty" tf:"segment_host_count,omitempty"` + // Number of hosts in segment subcluster (from 1 to 32). + SegmentHostCount *float64 `json:"segmentHostCount,omitempty" tf:"segment_host_count,omitempty"` -// A list of IDs of the host groups to place segment subclusters' VMs of the cluster on. -// +listType=set -SegmentHostGroupIds []*string `json:"segmentHostGroupIds,omitempty" tf:"segment_host_group_ids,omitempty"` + // A list of IDs of the host groups to place segment subclusters' VMs of the cluster on. + // +listType=set + SegmentHostGroupIds []*string `json:"segmentHostGroupIds,omitempty" tf:"segment_host_group_ids,omitempty"` -// Number of segments on segment host (not more then 1 + RAM/8). -SegmentInHost *float64 `json:"segmentInHost,omitempty" tf:"segment_in_host,omitempty"` + // Number of segments on segment host (not more then 1 + RAM/8). + SegmentInHost *float64 `json:"segmentInHost,omitempty" tf:"segment_in_host,omitempty"` -// Settings for segment subcluster. The structure is documented below. -SegmentSubcluster []SegmentSubclusterInitParameters `json:"segmentSubcluster,omitempty" tf:"segment_subcluster,omitempty"` + // Settings for segment subcluster. The structure is documented below. + SegmentSubcluster []SegmentSubclusterInitParameters `json:"segmentSubcluster,omitempty" tf:"segment_subcluster,omitempty"` -// The ID of the subnet, to which the hosts belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet, to which the hosts belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Greenplum cluster admin user name. -UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` + // Greenplum cluster admin user name. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` -// Greenplum cluster admin password name. -UserPasswordSecretRef v1.SecretKeySelector `json:"userPasswordSecretRef" tf:"-"` + // Greenplum cluster admin password name. + UserPasswordSecretRef v1.SecretKeySelector `json:"userPasswordSecretRef" tf:"-"` -// Version of the Greenplum cluster. (6.22 or 6.25) -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of the Greenplum cluster. (6.22 or 6.25) + Version *string `json:"version,omitempty" tf:"version,omitempty"` -// The availability zone where the Greenplum hosts will be created. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // The availability zone where the Greenplum hosts will be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type GreenplumClusterMaintenanceWindowInitParameters struct { + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type GreenplumClusterMaintenanceWindowObservation struct { + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type GreenplumClusterMaintenanceWindowParameters struct { + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -// +kubebuilder:validation:Optional -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. -// +kubebuilder:validation:Optional -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` } - type GreenplumClusterObservation struct { + // Access policy to the Greenplum cluster. The structure is documented below. + Access []GreenplumClusterAccessObservation `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the Greenplum cluster. The structure is documented below. -Access []GreenplumClusterAccessObservation `json:"access,omitempty" tf:"access,omitempty"` - -// Sets whether the master hosts should get a public IP address on creation. Changing this parameter for an existing host is not supported at the moment. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // Sets whether the master hosts should get a public IP address on creation. Changing this parameter for an existing host is not supported at the moment. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -BackgroundActivities []BackgroundActivitiesObservation `json:"backgroundActivities,omitempty" tf:"background_activities,omitempty"` + BackgroundActivities []BackgroundActivitiesObservation `json:"backgroundActivities,omitempty" tf:"background_activities,omitempty"` -// Time to start the daily backup, in the UTC timezone. The structure is documented below. -BackupWindowStart []GreenplumClusterBackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + BackupWindowStart []GreenplumClusterBackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Cloud Storage settings of the Greenplum cluster. The structure is documented below. -CloudStorage []GreenplumClusterCloudStorageObservation `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` + // Cloud Storage settings of the Greenplum cluster. The structure is documented below. + CloudStorage []GreenplumClusterCloudStorageObservation `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` -// Creation timestamp of the cluster. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Creation timestamp of the cluster. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the Greenplum cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Greenplum cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Deployment environment of the Greenplum cluster. (PRODUCTION, PRESTABLE) -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Deployment environment of the Greenplum cluster. (PRODUCTION, PRESTABLE) + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Greenplum cluster config. Detail info in "Greenplum cluster settings" section (documented below). -// +mapType=granular -GreenplumConfig map[string]*string `json:"greenplumConfig,omitempty" tf:"greenplum_config,omitempty"` + // Greenplum cluster config. Detail info in "Greenplum cluster settings" section (documented below). + // +mapType=granular + GreenplumConfig map[string]*string `json:"greenplumConfig,omitempty" tf:"greenplum_config,omitempty"` -// Aggregated health of the cluster. -Health *string `json:"health,omitempty" tf:"health,omitempty"` + // Aggregated health of the cluster. + Health *string `json:"health,omitempty" tf:"health,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the Greenplum cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Greenplum cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Maintenance policy of the Greenplum cluster. The structure is documented below. -MaintenanceWindow []GreenplumClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Maintenance policy of the Greenplum cluster. The structure is documented below. + MaintenanceWindow []GreenplumClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Number of hosts in master subcluster (1 or 2). -MasterHostCount *float64 `json:"masterHostCount,omitempty" tf:"master_host_count,omitempty"` + // Number of hosts in master subcluster (1 or 2). + MasterHostCount *float64 `json:"masterHostCount,omitempty" tf:"master_host_count,omitempty"` -// A list of IDs of the host groups to place master subclusters' VMs of the cluster on. -// +listType=set -MasterHostGroupIds []*string `json:"masterHostGroupIds,omitempty" tf:"master_host_group_ids,omitempty"` + // A list of IDs of the host groups to place master subclusters' VMs of the cluster on. + // +listType=set + MasterHostGroupIds []*string `json:"masterHostGroupIds,omitempty" tf:"master_host_group_ids,omitempty"` -// (Computed) Info about hosts in master subcluster. The structure is documented below. -MasterHosts []MasterHostsObservation `json:"masterHosts,omitempty" tf:"master_hosts,omitempty"` + // (Computed) Info about hosts in master subcluster. The structure is documented below. + MasterHosts []MasterHostsObservation `json:"masterHosts,omitempty" tf:"master_hosts,omitempty"` -// Settings for master subcluster. The structure is documented below. -MasterSubcluster []MasterSubclusterObservation `json:"masterSubcluster,omitempty" tf:"master_subcluster,omitempty"` + // Settings for master subcluster. The structure is documented below. + MasterSubcluster []MasterSubclusterObservation `json:"masterSubcluster,omitempty" tf:"master_subcluster,omitempty"` -// Name of the Greenplum cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Greenplum cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network, to which the Greenplum cluster uses. -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network, to which the Greenplum cluster uses. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// Configuration of the connection pooler. The structure is documented below. -PoolerConfig []PoolerConfigObservation `json:"poolerConfig,omitempty" tf:"pooler_config,omitempty"` + // Configuration of the connection pooler. The structure is documented below. + PoolerConfig []PoolerConfigObservation `json:"poolerConfig,omitempty" tf:"pooler_config,omitempty"` -// Configuration of the PXF daemon. The structure is documented below. -PxfConfig []PxfConfigObservation `json:"pxfConfig,omitempty" tf:"pxf_config,omitempty"` + // Configuration of the PXF daemon. The structure is documented below. + PxfConfig []PxfConfigObservation `json:"pxfConfig,omitempty" tf:"pxf_config,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// Number of hosts in segment subcluster (from 1 to 32). -SegmentHostCount *float64 `json:"segmentHostCount,omitempty" tf:"segment_host_count,omitempty"` + // Number of hosts in segment subcluster (from 1 to 32). + SegmentHostCount *float64 `json:"segmentHostCount,omitempty" tf:"segment_host_count,omitempty"` -// A list of IDs of the host groups to place segment subclusters' VMs of the cluster on. -// +listType=set -SegmentHostGroupIds []*string `json:"segmentHostGroupIds,omitempty" tf:"segment_host_group_ids,omitempty"` + // A list of IDs of the host groups to place segment subclusters' VMs of the cluster on. + // +listType=set + SegmentHostGroupIds []*string `json:"segmentHostGroupIds,omitempty" tf:"segment_host_group_ids,omitempty"` -// (Computed) Info about hosts in segment subcluster. The structure is documented below. -SegmentHosts []SegmentHostsObservation `json:"segmentHosts,omitempty" tf:"segment_hosts,omitempty"` + // (Computed) Info about hosts in segment subcluster. The structure is documented below. + SegmentHosts []SegmentHostsObservation `json:"segmentHosts,omitempty" tf:"segment_hosts,omitempty"` -// Number of segments on segment host (not more then 1 + RAM/8). -SegmentInHost *float64 `json:"segmentInHost,omitempty" tf:"segment_in_host,omitempty"` + // Number of segments on segment host (not more then 1 + RAM/8). + SegmentInHost *float64 `json:"segmentInHost,omitempty" tf:"segment_in_host,omitempty"` -// Settings for segment subcluster. The structure is documented below. -SegmentSubcluster []SegmentSubclusterObservation `json:"segmentSubcluster,omitempty" tf:"segment_subcluster,omitempty"` + // Settings for segment subcluster. The structure is documented below. + SegmentSubcluster []SegmentSubclusterObservation `json:"segmentSubcluster,omitempty" tf:"segment_subcluster,omitempty"` -// Status of the cluster. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // Status of the cluster. + Status *string `json:"status,omitempty" tf:"status,omitempty"` -// The ID of the subnet, to which the hosts belongs. The subnet must be a part of the network to which the cluster belongs. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet, to which the hosts belongs. The subnet must be a part of the network to which the cluster belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Greenplum cluster admin user name. -UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` + // Greenplum cluster admin user name. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` -// Version of the Greenplum cluster. (6.22 or 6.25) -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of the Greenplum cluster. (6.22 or 6.25) + Version *string `json:"version,omitempty" tf:"version,omitempty"` -// The availability zone where the Greenplum hosts will be created. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // The availability zone where the Greenplum hosts will be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type GreenplumClusterParameters struct { - -// Access policy to the Greenplum cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Access []GreenplumClusterAccessParameters `json:"access,omitempty" tf:"access,omitempty"` - -// Sets whether the master hosts should get a public IP address on creation. Changing this parameter for an existing host is not supported at the moment. -// +kubebuilder:validation:Optional -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` - -// +kubebuilder:validation:Optional -BackgroundActivities []BackgroundActivitiesParameters `json:"backgroundActivities,omitempty" tf:"background_activities,omitempty"` - -// Time to start the daily backup, in the UTC timezone. The structure is documented below. -// +kubebuilder:validation:Optional -BackupWindowStart []GreenplumClusterBackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` - -// Cloud Storage settings of the Greenplum cluster. The structure is documented below. -// +kubebuilder:validation:Optional -CloudStorage []GreenplumClusterCloudStorageParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` - -// Inhibits deletion of the cluster. Can be either true or false. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - -// Description of the Greenplum cluster. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// Deployment environment of the Greenplum cluster. (PRODUCTION, PRESTABLE) -// +kubebuilder:validation:Optional -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` - -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` - -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` - -// Greenplum cluster config. Detail info in "Greenplum cluster settings" section (documented below). -// +kubebuilder:validation:Optional -// +mapType=granular -GreenplumConfig map[string]*string `json:"greenplumConfig,omitempty" tf:"greenplum_config,omitempty"` - -// A set of key/value label pairs to assign to the Greenplum cluster. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - -// Maintenance policy of the Greenplum cluster. The structure is documented below. -// +kubebuilder:validation:Optional -MaintenanceWindow []GreenplumClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` - -// Number of hosts in master subcluster (1 or 2). -// +kubebuilder:validation:Optional -MasterHostCount *float64 `json:"masterHostCount,omitempty" tf:"master_host_count,omitempty"` - -// A list of IDs of the host groups to place master subclusters' VMs of the cluster on. -// +kubebuilder:validation:Optional -// +listType=set -MasterHostGroupIds []*string `json:"masterHostGroupIds,omitempty" tf:"master_host_group_ids,omitempty"` - -// Settings for master subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -MasterSubcluster []MasterSubclusterParameters `json:"masterSubcluster,omitempty" tf:"master_subcluster,omitempty"` - -// Name of the Greenplum cluster. Provided by the client when the cluster is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// ID of the network, to which the Greenplum cluster uses. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` - -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` - -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` - -// Configuration of the connection pooler. The structure is documented below. -// +kubebuilder:validation:Optional -PoolerConfig []PoolerConfigParameters `json:"poolerConfig,omitempty" tf:"pooler_config,omitempty"` - -// Configuration of the PXF daemon. The structure is documented below. -// +kubebuilder:validation:Optional -PxfConfig []PxfConfigParameters `json:"pxfConfig,omitempty" tf:"pxf_config,omitempty"` - -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` - -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` - -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` - -// Number of hosts in segment subcluster (from 1 to 32). -// +kubebuilder:validation:Optional -SegmentHostCount *float64 `json:"segmentHostCount,omitempty" tf:"segment_host_count,omitempty"` - -// A list of IDs of the host groups to place segment subclusters' VMs of the cluster on. -// +kubebuilder:validation:Optional -// +listType=set -SegmentHostGroupIds []*string `json:"segmentHostGroupIds,omitempty" tf:"segment_host_group_ids,omitempty"` - -// Number of segments on segment host (not more then 1 + RAM/8). -// +kubebuilder:validation:Optional -SegmentInHost *float64 `json:"segmentInHost,omitempty" tf:"segment_in_host,omitempty"` - -// Settings for segment subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -SegmentSubcluster []SegmentSubclusterParameters `json:"segmentSubcluster,omitempty" tf:"segment_subcluster,omitempty"` - -// The ID of the subnet, to which the hosts belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` - -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// Greenplum cluster admin user name. -// +kubebuilder:validation:Optional -UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` - -// Greenplum cluster admin password name. -// +kubebuilder:validation:Optional -UserPasswordSecretRef v1.SecretKeySelector `json:"userPasswordSecretRef" tf:"-"` - -// Version of the Greenplum cluster. (6.22 or 6.25) -// +kubebuilder:validation:Optional -Version *string `json:"version,omitempty" tf:"version,omitempty"` - -// The availability zone where the Greenplum hosts will be created. -// +kubebuilder:validation:Optional -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // Access policy to the Greenplum cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Access []GreenplumClusterAccessParameters `json:"access,omitempty" tf:"access,omitempty"` + + // Sets whether the master hosts should get a public IP address on creation. Changing this parameter for an existing host is not supported at the moment. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // +kubebuilder:validation:Optional + BackgroundActivities []BackgroundActivitiesParameters `json:"backgroundActivities,omitempty" tf:"background_activities,omitempty"` + + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + // +kubebuilder:validation:Optional + BackupWindowStart []GreenplumClusterBackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + + // Cloud Storage settings of the Greenplum cluster. The structure is documented below. + // +kubebuilder:validation:Optional + CloudStorage []GreenplumClusterCloudStorageParameters `json:"cloudStorage,omitempty" tf:"cloud_storage,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Greenplum cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Deployment environment of the Greenplum cluster. (PRODUCTION, PRESTABLE) + // +kubebuilder:validation:Optional + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Greenplum cluster config. Detail info in "Greenplum cluster settings" section (documented below). + // +kubebuilder:validation:Optional + // +mapType=granular + GreenplumConfig map[string]*string `json:"greenplumConfig,omitempty" tf:"greenplum_config,omitempty"` + + // A set of key/value label pairs to assign to the Greenplum cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Maintenance policy of the Greenplum cluster. The structure is documented below. + // +kubebuilder:validation:Optional + MaintenanceWindow []GreenplumClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Number of hosts in master subcluster (1 or 2). + // +kubebuilder:validation:Optional + MasterHostCount *float64 `json:"masterHostCount,omitempty" tf:"master_host_count,omitempty"` + + // A list of IDs of the host groups to place master subclusters' VMs of the cluster on. + // +kubebuilder:validation:Optional + // +listType=set + MasterHostGroupIds []*string `json:"masterHostGroupIds,omitempty" tf:"master_host_group_ids,omitempty"` + + // Settings for master subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + MasterSubcluster []MasterSubclusterParameters `json:"masterSubcluster,omitempty" tf:"master_subcluster,omitempty"` + + // Name of the Greenplum cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the Greenplum cluster uses. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Configuration of the connection pooler. The structure is documented below. + // +kubebuilder:validation:Optional + PoolerConfig []PoolerConfigParameters `json:"poolerConfig,omitempty" tf:"pooler_config,omitempty"` + + // Configuration of the PXF daemon. The structure is documented below. + // +kubebuilder:validation:Optional + PxfConfig []PxfConfigParameters `json:"pxfConfig,omitempty" tf:"pxf_config,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // Number of hosts in segment subcluster (from 1 to 32). + // +kubebuilder:validation:Optional + SegmentHostCount *float64 `json:"segmentHostCount,omitempty" tf:"segment_host_count,omitempty"` + + // A list of IDs of the host groups to place segment subclusters' VMs of the cluster on. + // +kubebuilder:validation:Optional + // +listType=set + SegmentHostGroupIds []*string `json:"segmentHostGroupIds,omitempty" tf:"segment_host_group_ids,omitempty"` + + // Number of segments on segment host (not more then 1 + RAM/8). + // +kubebuilder:validation:Optional + SegmentInHost *float64 `json:"segmentInHost,omitempty" tf:"segment_in_host,omitempty"` + + // Settings for segment subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + SegmentSubcluster []SegmentSubclusterParameters `json:"segmentSubcluster,omitempty" tf:"segment_subcluster,omitempty"` + + // The ID of the subnet, to which the hosts belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Greenplum cluster admin user name. + // +kubebuilder:validation:Optional + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` + + // Greenplum cluster admin password name. + // +kubebuilder:validation:Optional + UserPasswordSecretRef v1.SecretKeySelector `json:"userPasswordSecretRef" tf:"-"` + + // Version of the Greenplum cluster. (6.22 or 6.25) + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The availability zone where the Greenplum hosts will be created. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type MasterHostsInitParameters struct { - } - type MasterHostsObservation struct { + // (Computed) Flag indicating that master hosts should be created with a public IP address. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// (Computed) Flag indicating that master hosts should be created with a public IP address. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` - -// (Computed) The fully qualified domain name of the host. -Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + // (Computed) The fully qualified domain name of the host. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` } - type MasterHostsParameters struct { - } - type MasterSubclusterInitParameters struct { - -// Resources allocated to hosts for master subcluster of the Greenplum cluster. The structure is documented below. -Resources []MasterSubclusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts for master subcluster of the Greenplum cluster. The structure is documented below. + Resources []MasterSubclusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` } - type MasterSubclusterObservation struct { - -// Resources allocated to hosts for master subcluster of the Greenplum cluster. The structure is documented below. -Resources []MasterSubclusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts for master subcluster of the Greenplum cluster. The structure is documented below. + Resources []MasterSubclusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` } - type MasterSubclusterParameters struct { - -// Resources allocated to hosts for master subcluster of the Greenplum cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []MasterSubclusterResourcesParameters `json:"resources" tf:"resources,omitempty"` + // Resources allocated to hosts for master subcluster of the Greenplum cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []MasterSubclusterResourcesParameters `json:"resources" tf:"resources,omitempty"` } - type MasterSubclusterResourcesInitParameters struct { + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type MasterSubclusterResourcesObservation struct { + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type MasterSubclusterResourcesParameters struct { + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` - -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type PoolerConfigInitParameters struct { + // Value for pool_client_idle_timeout parameter in Odyssey. + PoolClientIdleTimeout *float64 `json:"poolClientIdleTimeout,omitempty" tf:"pool_client_idle_timeout,omitempty"` -// Value for pool_client_idle_timeout parameter in Odyssey. -PoolClientIdleTimeout *float64 `json:"poolClientIdleTimeout,omitempty" tf:"pool_client_idle_timeout,omitempty"` + // Value for pool_size parameter in Odyssey. + PoolSize *float64 `json:"poolSize,omitempty" tf:"pool_size,omitempty"` -// Value for pool_size parameter in Odyssey. -PoolSize *float64 `json:"poolSize,omitempty" tf:"pool_size,omitempty"` - -// Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. -PoolingMode *string `json:"poolingMode,omitempty" tf:"pooling_mode,omitempty"` + // Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + PoolingMode *string `json:"poolingMode,omitempty" tf:"pooling_mode,omitempty"` } - type PoolerConfigObservation struct { + // Value for pool_client_idle_timeout parameter in Odyssey. + PoolClientIdleTimeout *float64 `json:"poolClientIdleTimeout,omitempty" tf:"pool_client_idle_timeout,omitempty"` -// Value for pool_client_idle_timeout parameter in Odyssey. -PoolClientIdleTimeout *float64 `json:"poolClientIdleTimeout,omitempty" tf:"pool_client_idle_timeout,omitempty"` + // Value for pool_size parameter in Odyssey. + PoolSize *float64 `json:"poolSize,omitempty" tf:"pool_size,omitempty"` -// Value for pool_size parameter in Odyssey. -PoolSize *float64 `json:"poolSize,omitempty" tf:"pool_size,omitempty"` - -// Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. -PoolingMode *string `json:"poolingMode,omitempty" tf:"pooling_mode,omitempty"` + // Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + PoolingMode *string `json:"poolingMode,omitempty" tf:"pooling_mode,omitempty"` } - type PoolerConfigParameters struct { + // Value for pool_client_idle_timeout parameter in Odyssey. + // +kubebuilder:validation:Optional + PoolClientIdleTimeout *float64 `json:"poolClientIdleTimeout,omitempty" tf:"pool_client_idle_timeout,omitempty"` -// Value for pool_client_idle_timeout parameter in Odyssey. -// +kubebuilder:validation:Optional -PoolClientIdleTimeout *float64 `json:"poolClientIdleTimeout,omitempty" tf:"pool_client_idle_timeout,omitempty"` + // Value for pool_size parameter in Odyssey. + // +kubebuilder:validation:Optional + PoolSize *float64 `json:"poolSize,omitempty" tf:"pool_size,omitempty"` -// Value for pool_size parameter in Odyssey. -// +kubebuilder:validation:Optional -PoolSize *float64 `json:"poolSize,omitempty" tf:"pool_size,omitempty"` - -// Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. -// +kubebuilder:validation:Optional -PoolingMode *string `json:"poolingMode,omitempty" tf:"pooling_mode,omitempty"` + // Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + // +kubebuilder:validation:Optional + PoolingMode *string `json:"poolingMode,omitempty" tf:"pooling_mode,omitempty"` } - type PxfConfigInitParameters struct { + // The Tomcat server connection timeout for read operations in seconds. Value is between 5 and 600. + ConnectionTimeout *float64 `json:"connectionTimeout,omitempty" tf:"connection_timeout,omitempty"` -// The Tomcat server connection timeout for read operations in seconds. Value is between 5 and 600. -ConnectionTimeout *float64 `json:"connectionTimeout,omitempty" tf:"connection_timeout,omitempty"` + // The maximum number of PXF tomcat threads. Value is between 1 and 1024. + MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` -// The maximum number of PXF tomcat threads. Value is between 1 and 1024. -MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` + // Identifies whether or not core streaming threads are allowed to time out. + PoolAllowCoreThreadTimeout *bool `json:"poolAllowCoreThreadTimeout,omitempty" tf:"pool_allow_core_thread_timeout,omitempty"` -// Identifies whether or not core streaming threads are allowed to time out. -PoolAllowCoreThreadTimeout *bool `json:"poolAllowCoreThreadTimeout,omitempty" tf:"pool_allow_core_thread_timeout,omitempty"` + // The number of core streaming threads. Value is between 1 and 1024. + PoolCoreSize *float64 `json:"poolCoreSize,omitempty" tf:"pool_core_size,omitempty"` -// The number of core streaming threads. Value is between 1 and 1024. -PoolCoreSize *float64 `json:"poolCoreSize,omitempty" tf:"pool_core_size,omitempty"` + // The maximum allowed number of core streaming threads. Value is between 1 and 1024. + PoolMaxSize *float64 `json:"poolMaxSize,omitempty" tf:"pool_max_size,omitempty"` -// The maximum allowed number of core streaming threads. Value is between 1 and 1024. -PoolMaxSize *float64 `json:"poolMaxSize,omitempty" tf:"pool_max_size,omitempty"` + // The capacity of the core streaming thread pool queue. Value is positive. + PoolQueueCapacity *float64 `json:"poolQueueCapacity,omitempty" tf:"pool_queue_capacity,omitempty"` -// The capacity of the core streaming thread pool queue. Value is positive. -PoolQueueCapacity *float64 `json:"poolQueueCapacity,omitempty" tf:"pool_queue_capacity,omitempty"` + // The Tomcat server connection timeout for write operations in seconds. Value is between 5 and 600. + UploadTimeout *float64 `json:"uploadTimeout,omitempty" tf:"upload_timeout,omitempty"` -// The Tomcat server connection timeout for write operations in seconds. Value is between 5 and 600. -UploadTimeout *float64 `json:"uploadTimeout,omitempty" tf:"upload_timeout,omitempty"` + // Maximum JVM heap size for PXF daemon. Value is between 64 and 16384. + Xms *float64 `json:"xms,omitempty" tf:"xms,omitempty"` -// Maximum JVM heap size for PXF daemon. Value is between 64 and 16384. -Xms *float64 `json:"xms,omitempty" tf:"xms,omitempty"` - -// Initial JVM heap size for PXF daemon. Value is between 64 and 16384. -Xmx *float64 `json:"xmx,omitempty" tf:"xmx,omitempty"` + // Initial JVM heap size for PXF daemon. Value is between 64 and 16384. + Xmx *float64 `json:"xmx,omitempty" tf:"xmx,omitempty"` } - type PxfConfigObservation struct { + // The Tomcat server connection timeout for read operations in seconds. Value is between 5 and 600. + ConnectionTimeout *float64 `json:"connectionTimeout,omitempty" tf:"connection_timeout,omitempty"` -// The Tomcat server connection timeout for read operations in seconds. Value is between 5 and 600. -ConnectionTimeout *float64 `json:"connectionTimeout,omitempty" tf:"connection_timeout,omitempty"` + // The maximum number of PXF tomcat threads. Value is between 1 and 1024. + MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` -// The maximum number of PXF tomcat threads. Value is between 1 and 1024. -MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` + // Identifies whether or not core streaming threads are allowed to time out. + PoolAllowCoreThreadTimeout *bool `json:"poolAllowCoreThreadTimeout,omitempty" tf:"pool_allow_core_thread_timeout,omitempty"` -// Identifies whether or not core streaming threads are allowed to time out. -PoolAllowCoreThreadTimeout *bool `json:"poolAllowCoreThreadTimeout,omitempty" tf:"pool_allow_core_thread_timeout,omitempty"` + // The number of core streaming threads. Value is between 1 and 1024. + PoolCoreSize *float64 `json:"poolCoreSize,omitempty" tf:"pool_core_size,omitempty"` -// The number of core streaming threads. Value is between 1 and 1024. -PoolCoreSize *float64 `json:"poolCoreSize,omitempty" tf:"pool_core_size,omitempty"` + // The maximum allowed number of core streaming threads. Value is between 1 and 1024. + PoolMaxSize *float64 `json:"poolMaxSize,omitempty" tf:"pool_max_size,omitempty"` -// The maximum allowed number of core streaming threads. Value is between 1 and 1024. -PoolMaxSize *float64 `json:"poolMaxSize,omitempty" tf:"pool_max_size,omitempty"` + // The capacity of the core streaming thread pool queue. Value is positive. + PoolQueueCapacity *float64 `json:"poolQueueCapacity,omitempty" tf:"pool_queue_capacity,omitempty"` -// The capacity of the core streaming thread pool queue. Value is positive. -PoolQueueCapacity *float64 `json:"poolQueueCapacity,omitempty" tf:"pool_queue_capacity,omitempty"` + // The Tomcat server connection timeout for write operations in seconds. Value is between 5 and 600. + UploadTimeout *float64 `json:"uploadTimeout,omitempty" tf:"upload_timeout,omitempty"` -// The Tomcat server connection timeout for write operations in seconds. Value is between 5 and 600. -UploadTimeout *float64 `json:"uploadTimeout,omitempty" tf:"upload_timeout,omitempty"` + // Maximum JVM heap size for PXF daemon. Value is between 64 and 16384. + Xms *float64 `json:"xms,omitempty" tf:"xms,omitempty"` -// Maximum JVM heap size for PXF daemon. Value is between 64 and 16384. -Xms *float64 `json:"xms,omitempty" tf:"xms,omitempty"` - -// Initial JVM heap size for PXF daemon. Value is between 64 and 16384. -Xmx *float64 `json:"xmx,omitempty" tf:"xmx,omitempty"` + // Initial JVM heap size for PXF daemon. Value is between 64 and 16384. + Xmx *float64 `json:"xmx,omitempty" tf:"xmx,omitempty"` } - type PxfConfigParameters struct { + // The Tomcat server connection timeout for read operations in seconds. Value is between 5 and 600. + // +kubebuilder:validation:Optional + ConnectionTimeout *float64 `json:"connectionTimeout,omitempty" tf:"connection_timeout,omitempty"` -// The Tomcat server connection timeout for read operations in seconds. Value is between 5 and 600. -// +kubebuilder:validation:Optional -ConnectionTimeout *float64 `json:"connectionTimeout,omitempty" tf:"connection_timeout,omitempty"` + // The maximum number of PXF tomcat threads. Value is between 1 and 1024. + // +kubebuilder:validation:Optional + MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` -// The maximum number of PXF tomcat threads. Value is between 1 and 1024. -// +kubebuilder:validation:Optional -MaxThreads *float64 `json:"maxThreads,omitempty" tf:"max_threads,omitempty"` + // Identifies whether or not core streaming threads are allowed to time out. + // +kubebuilder:validation:Optional + PoolAllowCoreThreadTimeout *bool `json:"poolAllowCoreThreadTimeout,omitempty" tf:"pool_allow_core_thread_timeout,omitempty"` -// Identifies whether or not core streaming threads are allowed to time out. -// +kubebuilder:validation:Optional -PoolAllowCoreThreadTimeout *bool `json:"poolAllowCoreThreadTimeout,omitempty" tf:"pool_allow_core_thread_timeout,omitempty"` + // The number of core streaming threads. Value is between 1 and 1024. + // +kubebuilder:validation:Optional + PoolCoreSize *float64 `json:"poolCoreSize,omitempty" tf:"pool_core_size,omitempty"` -// The number of core streaming threads. Value is between 1 and 1024. -// +kubebuilder:validation:Optional -PoolCoreSize *float64 `json:"poolCoreSize,omitempty" tf:"pool_core_size,omitempty"` + // The maximum allowed number of core streaming threads. Value is between 1 and 1024. + // +kubebuilder:validation:Optional + PoolMaxSize *float64 `json:"poolMaxSize,omitempty" tf:"pool_max_size,omitempty"` -// The maximum allowed number of core streaming threads. Value is between 1 and 1024. -// +kubebuilder:validation:Optional -PoolMaxSize *float64 `json:"poolMaxSize,omitempty" tf:"pool_max_size,omitempty"` + // The capacity of the core streaming thread pool queue. Value is positive. + // +kubebuilder:validation:Optional + PoolQueueCapacity *float64 `json:"poolQueueCapacity,omitempty" tf:"pool_queue_capacity,omitempty"` -// The capacity of the core streaming thread pool queue. Value is positive. -// +kubebuilder:validation:Optional -PoolQueueCapacity *float64 `json:"poolQueueCapacity,omitempty" tf:"pool_queue_capacity,omitempty"` + // The Tomcat server connection timeout for write operations in seconds. Value is between 5 and 600. + // +kubebuilder:validation:Optional + UploadTimeout *float64 `json:"uploadTimeout,omitempty" tf:"upload_timeout,omitempty"` -// The Tomcat server connection timeout for write operations in seconds. Value is between 5 and 600. -// +kubebuilder:validation:Optional -UploadTimeout *float64 `json:"uploadTimeout,omitempty" tf:"upload_timeout,omitempty"` + // Maximum JVM heap size for PXF daemon. Value is between 64 and 16384. + // +kubebuilder:validation:Optional + Xms *float64 `json:"xms,omitempty" tf:"xms,omitempty"` -// Maximum JVM heap size for PXF daemon. Value is between 64 and 16384. -// +kubebuilder:validation:Optional -Xms *float64 `json:"xms,omitempty" tf:"xms,omitempty"` - -// Initial JVM heap size for PXF daemon. Value is between 64 and 16384. -// +kubebuilder:validation:Optional -Xmx *float64 `json:"xmx,omitempty" tf:"xmx,omitempty"` + // Initial JVM heap size for PXF daemon. Value is between 64 and 16384. + // +kubebuilder:validation:Optional + Xmx *float64 `json:"xmx,omitempty" tf:"xmx,omitempty"` } - type QueryKillerIdleInTransactionInitParameters struct { + // Flag that indicates whether script is enabled. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` -// Flag that indicates whether script is enabled. -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // List of users to ignore when considering queries to terminate. + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` -// List of users to ignore when considering queries to terminate. -IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` - -// Maximum duration for this type of queries (in seconds). -MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // Maximum duration for this type of queries (in seconds). + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` } - type QueryKillerIdleInTransactionObservation struct { + // Flag that indicates whether script is enabled. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` -// Flag that indicates whether script is enabled. -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // List of users to ignore when considering queries to terminate. + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` -// List of users to ignore when considering queries to terminate. -IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` - -// Maximum duration for this type of queries (in seconds). -MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // Maximum duration for this type of queries (in seconds). + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` } - type QueryKillerIdleInTransactionParameters struct { + // Flag that indicates whether script is enabled. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` -// Flag that indicates whether script is enabled. -// +kubebuilder:validation:Optional -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // List of users to ignore when considering queries to terminate. + // +kubebuilder:validation:Optional + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` -// List of users to ignore when considering queries to terminate. -// +kubebuilder:validation:Optional -IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` - -// Maximum duration for this type of queries (in seconds). -// +kubebuilder:validation:Optional -MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // Maximum duration for this type of queries (in seconds). + // +kubebuilder:validation:Optional + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` } - type QueryKillerIdleInitParameters struct { + // Flag that indicates whether script is enabled. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` -// Flag that indicates whether script is enabled. -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // List of users to ignore when considering queries to terminate. + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` -// List of users to ignore when considering queries to terminate. -IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` - -// Maximum duration for this type of queries (in seconds). -MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // Maximum duration for this type of queries (in seconds). + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` } - type QueryKillerIdleObservation struct { + // Flag that indicates whether script is enabled. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` -// Flag that indicates whether script is enabled. -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // List of users to ignore when considering queries to terminate. + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` -// List of users to ignore when considering queries to terminate. -IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` - -// Maximum duration for this type of queries (in seconds). -MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // Maximum duration for this type of queries (in seconds). + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` } - type QueryKillerIdleParameters struct { + // Flag that indicates whether script is enabled. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` -// Flag that indicates whether script is enabled. -// +kubebuilder:validation:Optional -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // List of users to ignore when considering queries to terminate. + // +kubebuilder:validation:Optional + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` -// List of users to ignore when considering queries to terminate. -// +kubebuilder:validation:Optional -IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` - -// Maximum duration for this type of queries (in seconds). -// +kubebuilder:validation:Optional -MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // Maximum duration for this type of queries (in seconds). + // +kubebuilder:validation:Optional + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` } - type QueryKillerLongRunningInitParameters struct { + // Flag that indicates whether script is enabled. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` -// Flag that indicates whether script is enabled. -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // List of users to ignore when considering queries to terminate. + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` -// List of users to ignore when considering queries to terminate. -IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` - -// Maximum duration for this type of queries (in seconds). -MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // Maximum duration for this type of queries (in seconds). + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` } - type QueryKillerLongRunningObservation struct { + // Flag that indicates whether script is enabled. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` -// Flag that indicates whether script is enabled. -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // List of users to ignore when considering queries to terminate. + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` -// List of users to ignore when considering queries to terminate. -IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` - -// Maximum duration for this type of queries (in seconds). -MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // Maximum duration for this type of queries (in seconds). + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` } - type QueryKillerLongRunningParameters struct { + // Flag that indicates whether script is enabled. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` -// Flag that indicates whether script is enabled. -// +kubebuilder:validation:Optional -Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + // List of users to ignore when considering queries to terminate. + // +kubebuilder:validation:Optional + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` -// List of users to ignore when considering queries to terminate. -// +kubebuilder:validation:Optional -IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` - -// Maximum duration for this type of queries (in seconds). -// +kubebuilder:validation:Optional -MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` + // Maximum duration for this type of queries (in seconds). + // +kubebuilder:validation:Optional + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` } - type SegmentHostsInitParameters struct { - } - type SegmentHostsObservation struct { - -// (Computed) The fully qualified domain name of the host. -Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + // (Computed) The fully qualified domain name of the host. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` } - type SegmentHostsParameters struct { - } - type SegmentSubclusterInitParameters struct { - -// Resources allocated to hosts for segment subcluster of the Greenplum cluster. The structure is documented below. -Resources []SegmentSubclusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts for segment subcluster of the Greenplum cluster. The structure is documented below. + Resources []SegmentSubclusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` } - type SegmentSubclusterObservation struct { - -// Resources allocated to hosts for segment subcluster of the Greenplum cluster. The structure is documented below. -Resources []SegmentSubclusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts for segment subcluster of the Greenplum cluster. The structure is documented below. + Resources []SegmentSubclusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` } - type SegmentSubclusterParameters struct { - -// Resources allocated to hosts for segment subcluster of the Greenplum cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []SegmentSubclusterResourcesParameters `json:"resources" tf:"resources,omitempty"` + // Resources allocated to hosts for segment subcluster of the Greenplum cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []SegmentSubclusterResourcesParameters `json:"resources" tf:"resources,omitempty"` } - type SegmentSubclusterResourcesInitParameters struct { + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type SegmentSubclusterResourcesObservation struct { + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type SegmentSubclusterResourcesParameters struct { + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` - -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } // GreenplumClusterSpec defines the desired state of GreenplumCluster type GreenplumClusterSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider GreenplumClusterParameters `json:"forProvider"` + ForProvider GreenplumClusterParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -1137,20 +1020,19 @@ type GreenplumClusterSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider GreenplumClusterInitParameters `json:"initProvider,omitempty"` + InitProvider GreenplumClusterInitParameters `json:"initProvider,omitempty"` } // GreenplumClusterStatus defines the observed state of GreenplumCluster. type GreenplumClusterStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider GreenplumClusterObservation `json:"atProvider,omitempty"` + AtProvider GreenplumClusterObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // GreenplumCluster is the Schema for the GreenplumClusters API. Manages a Greenplum cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -1160,20 +1042,20 @@ type GreenplumClusterStatus struct { type GreenplumCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.assignPublicIp) || (has(self.initProvider) && has(self.initProvider.assignPublicIp))",message="spec.forProvider.assignPublicIp is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.masterHostCount) || (has(self.initProvider) && has(self.initProvider.masterHostCount))",message="spec.forProvider.masterHostCount is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.masterSubcluster) || (has(self.initProvider) && has(self.initProvider.masterSubcluster))",message="spec.forProvider.masterSubcluster is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.segmentHostCount) || (has(self.initProvider) && has(self.initProvider.segmentHostCount))",message="spec.forProvider.segmentHostCount is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.segmentInHost) || (has(self.initProvider) && has(self.initProvider.segmentInHost))",message="spec.forProvider.segmentInHost is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.segmentSubcluster) || (has(self.initProvider) && has(self.initProvider.segmentSubcluster))",message="spec.forProvider.segmentSubcluster is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userName) || (has(self.initProvider) && has(self.initProvider.userName))",message="spec.forProvider.userName is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userPasswordSecretRef)",message="spec.forProvider.userPasswordSecretRef is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.zone) || (has(self.initProvider) && has(self.initProvider.zone))",message="spec.forProvider.zone is a required parameter" - Spec GreenplumClusterSpec `json:"spec"` - Status GreenplumClusterStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.assignPublicIp) || (has(self.initProvider) && has(self.initProvider.assignPublicIp))",message="spec.forProvider.assignPublicIp is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.masterHostCount) || (has(self.initProvider) && has(self.initProvider.masterHostCount))",message="spec.forProvider.masterHostCount is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.masterSubcluster) || (has(self.initProvider) && has(self.initProvider.masterSubcluster))",message="spec.forProvider.masterSubcluster is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.segmentHostCount) || (has(self.initProvider) && has(self.initProvider.segmentHostCount))",message="spec.forProvider.segmentHostCount is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.segmentInHost) || (has(self.initProvider) && has(self.initProvider.segmentInHost))",message="spec.forProvider.segmentInHost is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.segmentSubcluster) || (has(self.initProvider) && has(self.initProvider.segmentSubcluster))",message="spec.forProvider.segmentSubcluster is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userName) || (has(self.initProvider) && has(self.initProvider.userName))",message="spec.forProvider.userName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userPasswordSecretRef)",message="spec.forProvider.userPasswordSecretRef is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.zone) || (has(self.initProvider) && has(self.initProvider.zone))",message="spec.forProvider.zone is a required parameter" + Spec GreenplumClusterSpec `json:"spec"` + Status GreenplumClusterStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_groupversion_info.go b/apis/mdb/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..62e7755 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=mdb.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "mdb.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/mdb/v1alpha1/zz_kafkacluster_terraformed.go b/apis/mdb/v1alpha1/zz_kafkacluster_terraformed.go new file mode 100755 index 0000000..14f5b51 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_kafkacluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this KafkaCluster +func (mg *KafkaCluster) GetTerraformResourceType() string { + return "yandex_mdb_kafka_cluster" +} + +// GetConnectionDetailsMapping for this KafkaCluster +func (tr *KafkaCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"user[*].password": "user[*].passwordSecretRef"} +} + +// GetObservation of this KafkaCluster +func (tr *KafkaCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this KafkaCluster +func (tr *KafkaCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this KafkaCluster +func (tr *KafkaCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this KafkaCluster +func (tr *KafkaCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this KafkaCluster +func (tr *KafkaCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this KafkaCluster +func (tr *KafkaCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this KafkaCluster +func (tr *KafkaCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this KafkaCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *KafkaCluster) LateInitialize(attrs []byte) (bool, error) { + params := &KafkaClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *KafkaCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_kafkacluster_types.go b/apis/mdb/v1alpha1/zz_kafkacluster_types.go index 33ad5a2..e1bbd8f 100755 --- a/apis/mdb/v1alpha1/zz_kafkacluster_types.go +++ b/apis/mdb/v1alpha1/zz_kafkacluster_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,1087 +7,990 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type ConfigAccessInitParameters struct { - -// Allow access for DataTransfer -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` } - type ConfigAccessObservation struct { - -// Allow access for DataTransfer -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` } - type ConfigAccessParameters struct { - -// Allow access for DataTransfer -// +kubebuilder:validation:Optional -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + // +kubebuilder:validation:Optional + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` } - type ConfigKafkaInitParameters struct { + // User-defined settings for the Kafka cluster. The structure is documented below. + KafkaConfig []KafkaConfigInitParameters `json:"kafkaConfig,omitempty" tf:"kafka_config,omitempty"` -// User-defined settings for the Kafka cluster. The structure is documented below. -KafkaConfig []KafkaConfigInitParameters `json:"kafkaConfig,omitempty" tf:"kafka_config,omitempty"` - -// Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. -Resources []KafkaResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. + Resources []KafkaResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` } - type ConfigKafkaObservation struct { + // User-defined settings for the Kafka cluster. The structure is documented below. + KafkaConfig []KafkaConfigObservation `json:"kafkaConfig,omitempty" tf:"kafka_config,omitempty"` -// User-defined settings for the Kafka cluster. The structure is documented below. -KafkaConfig []KafkaConfigObservation `json:"kafkaConfig,omitempty" tf:"kafka_config,omitempty"` - -// Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. -Resources []KafkaResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. + Resources []KafkaResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` } - type ConfigKafkaParameters struct { + // User-defined settings for the Kafka cluster. The structure is documented below. + // +kubebuilder:validation:Optional + KafkaConfig []KafkaConfigParameters `json:"kafkaConfig,omitempty" tf:"kafka_config,omitempty"` -// User-defined settings for the Kafka cluster. The structure is documented below. -// +kubebuilder:validation:Optional -KafkaConfig []KafkaConfigParameters `json:"kafkaConfig,omitempty" tf:"kafka_config,omitempty"` - -// Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []KafkaResourcesParameters `json:"resources" tf:"resources,omitempty"` + // Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []KafkaResourcesParameters `json:"resources" tf:"resources,omitempty"` } - type ConfigZookeeperInitParameters struct { - -// Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. -Resources []ConfigZookeeperResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. + Resources []ConfigZookeeperResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` } - type ConfigZookeeperObservation struct { - -// Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. -Resources []ConfigZookeeperResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. + Resources []ConfigZookeeperResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` } - type ConfigZookeeperParameters struct { - -// Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []ConfigZookeeperResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []ConfigZookeeperResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` } - type ConfigZookeeperResourcesInitParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ConfigZookeeperResourcesObservation struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ConfigZookeeperResourcesParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type DiskSizeAutoscalingInitParameters struct { + // Maximum possible size of disk in bytes. + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Maximum possible size of disk in bytes. -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` - -// Percent of disk utilization. Disk will autoscale immediately, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). Must be not less then 'planned_usage_threshold' value. -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + // Percent of disk utilization. Disk will autoscale immediately, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). Must be not less then 'planned_usage_threshold' value. + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Percent of disk utilization. During maintenance disk will autoscale, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Percent of disk utilization. During maintenance disk will autoscale, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingObservation struct { + // Maximum possible size of disk in bytes. + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Maximum possible size of disk in bytes. -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + // Percent of disk utilization. Disk will autoscale immediately, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). Must be not less then 'planned_usage_threshold' value. + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Percent of disk utilization. Disk will autoscale immediately, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). Must be not less then 'planned_usage_threshold' value. -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` - -// Percent of disk utilization. During maintenance disk will autoscale, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Percent of disk utilization. During maintenance disk will autoscale, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingParameters struct { + // Maximum possible size of disk in bytes. + // +kubebuilder:validation:Optional + DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` -// Maximum possible size of disk in bytes. -// +kubebuilder:validation:Optional -DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` - -// Percent of disk utilization. Disk will autoscale immediately, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). Must be not less then 'planned_usage_threshold' value. -// +kubebuilder:validation:Optional -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + // Percent of disk utilization. Disk will autoscale immediately, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). Must be not less then 'planned_usage_threshold' value. + // +kubebuilder:validation:Optional + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Percent of disk utilization. During maintenance disk will autoscale, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). -// +kubebuilder:validation:Optional -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Percent of disk utilization. During maintenance disk will autoscale, if this threshold reached. Value is between 0 and 100. Default value is 0 (autoscaling disabled). + // +kubebuilder:validation:Optional + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type KafkaClusterConfigInitParameters struct { + // Access policy to the Kafka cluster. The structure is documented below. + Access []ConfigAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the Kafka cluster. The structure is documented below. -Access []ConfigAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` - -// Determines whether each broker will be assigned a public IP address. The default is false. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // Determines whether each broker will be assigned a public IP address. The default is false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Count of brokers per availability zone. The default is 1. -BrokersCount *float64 `json:"brokersCount,omitempty" tf:"brokers_count,omitempty"` + // Count of brokers per availability zone. The default is 1. + BrokersCount *float64 `json:"brokersCount,omitempty" tf:"brokers_count,omitempty"` -// Disk autoscaling settings of the Kafka cluster. The structure is documented below. -DiskSizeAutoscaling []DiskSizeAutoscalingInitParameters `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` + // Disk autoscaling settings of the Kafka cluster. The structure is documented below. + DiskSizeAutoscaling []DiskSizeAutoscalingInitParameters `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` -// Configuration of the Kafka subcluster. The structure is documented below. -Kafka []ConfigKafkaInitParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` + // Configuration of the Kafka subcluster. The structure is documented below. + Kafka []ConfigKafkaInitParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` -// Enables managed schema registry on cluster. The default is false. -SchemaRegistry *bool `json:"schemaRegistry,omitempty" tf:"schema_registry,omitempty"` + // Enables managed schema registry on cluster. The default is false. + SchemaRegistry *bool `json:"schemaRegistry,omitempty" tf:"schema_registry,omitempty"` -UnmanagedTopics *bool `json:"unmanagedTopics,omitempty" tf:"unmanaged_topics,omitempty"` + UnmanagedTopics *bool `json:"unmanagedTopics,omitempty" tf:"unmanaged_topics,omitempty"` -// Version of the Kafka server software. -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of the Kafka server software. + Version *string `json:"version,omitempty" tf:"version,omitempty"` -// List of availability zones. -Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` + // List of availability zones. + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` -// Configuration of the ZooKeeper subcluster. The structure is documented below. -Zookeeper []ConfigZookeeperInitParameters `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` + // Configuration of the ZooKeeper subcluster. The structure is documented below. + Zookeeper []ConfigZookeeperInitParameters `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` } - type KafkaClusterConfigObservation struct { + // Access policy to the Kafka cluster. The structure is documented below. + Access []ConfigAccessObservation `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the Kafka cluster. The structure is documented below. -Access []ConfigAccessObservation `json:"access,omitempty" tf:"access,omitempty"` + // Determines whether each broker will be assigned a public IP address. The default is false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Determines whether each broker will be assigned a public IP address. The default is false. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // Count of brokers per availability zone. The default is 1. + BrokersCount *float64 `json:"brokersCount,omitempty" tf:"brokers_count,omitempty"` -// Count of brokers per availability zone. The default is 1. -BrokersCount *float64 `json:"brokersCount,omitempty" tf:"brokers_count,omitempty"` + // Disk autoscaling settings of the Kafka cluster. The structure is documented below. + DiskSizeAutoscaling []DiskSizeAutoscalingObservation `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` -// Disk autoscaling settings of the Kafka cluster. The structure is documented below. -DiskSizeAutoscaling []DiskSizeAutoscalingObservation `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` + // Configuration of the Kafka subcluster. The structure is documented below. + Kafka []ConfigKafkaObservation `json:"kafka,omitempty" tf:"kafka,omitempty"` -// Configuration of the Kafka subcluster. The structure is documented below. -Kafka []ConfigKafkaObservation `json:"kafka,omitempty" tf:"kafka,omitempty"` + // Enables managed schema registry on cluster. The default is false. + SchemaRegistry *bool `json:"schemaRegistry,omitempty" tf:"schema_registry,omitempty"` -// Enables managed schema registry on cluster. The default is false. -SchemaRegistry *bool `json:"schemaRegistry,omitempty" tf:"schema_registry,omitempty"` + UnmanagedTopics *bool `json:"unmanagedTopics,omitempty" tf:"unmanaged_topics,omitempty"` -UnmanagedTopics *bool `json:"unmanagedTopics,omitempty" tf:"unmanaged_topics,omitempty"` + // Version of the Kafka server software. + Version *string `json:"version,omitempty" tf:"version,omitempty"` -// Version of the Kafka server software. -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // List of availability zones. + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` -// List of availability zones. -Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` - -// Configuration of the ZooKeeper subcluster. The structure is documented below. -Zookeeper []ConfigZookeeperObservation `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` + // Configuration of the ZooKeeper subcluster. The structure is documented below. + Zookeeper []ConfigZookeeperObservation `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` } - type KafkaClusterConfigParameters struct { + // Access policy to the Kafka cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Access []ConfigAccessParameters `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the Kafka cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Access []ConfigAccessParameters `json:"access,omitempty" tf:"access,omitempty"` - -// Determines whether each broker will be assigned a public IP address. The default is false. -// +kubebuilder:validation:Optional -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // Determines whether each broker will be assigned a public IP address. The default is false. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Count of brokers per availability zone. The default is 1. -// +kubebuilder:validation:Optional -BrokersCount *float64 `json:"brokersCount,omitempty" tf:"brokers_count,omitempty"` + // Count of brokers per availability zone. The default is 1. + // +kubebuilder:validation:Optional + BrokersCount *float64 `json:"brokersCount,omitempty" tf:"brokers_count,omitempty"` -// Disk autoscaling settings of the Kafka cluster. The structure is documented below. -// +kubebuilder:validation:Optional -DiskSizeAutoscaling []DiskSizeAutoscalingParameters `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` + // Disk autoscaling settings of the Kafka cluster. The structure is documented below. + // +kubebuilder:validation:Optional + DiskSizeAutoscaling []DiskSizeAutoscalingParameters `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` -// Configuration of the Kafka subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Kafka []ConfigKafkaParameters `json:"kafka" tf:"kafka,omitempty"` + // Configuration of the Kafka subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Kafka []ConfigKafkaParameters `json:"kafka" tf:"kafka,omitempty"` -// Enables managed schema registry on cluster. The default is false. -// +kubebuilder:validation:Optional -SchemaRegistry *bool `json:"schemaRegistry,omitempty" tf:"schema_registry,omitempty"` + // Enables managed schema registry on cluster. The default is false. + // +kubebuilder:validation:Optional + SchemaRegistry *bool `json:"schemaRegistry,omitempty" tf:"schema_registry,omitempty"` -// +kubebuilder:validation:Optional -UnmanagedTopics *bool `json:"unmanagedTopics,omitempty" tf:"unmanaged_topics,omitempty"` + // +kubebuilder:validation:Optional + UnmanagedTopics *bool `json:"unmanagedTopics,omitempty" tf:"unmanaged_topics,omitempty"` -// Version of the Kafka server software. -// +kubebuilder:validation:Optional -Version *string `json:"version" tf:"version,omitempty"` + // Version of the Kafka server software. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` -// List of availability zones. -// +kubebuilder:validation:Optional -Zones []*string `json:"zones" tf:"zones,omitempty"` + // List of availability zones. + // +kubebuilder:validation:Optional + Zones []*string `json:"zones" tf:"zones,omitempty"` -// Configuration of the ZooKeeper subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -Zookeeper []ConfigZookeeperParameters `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` + // Configuration of the ZooKeeper subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Zookeeper []ConfigZookeeperParameters `json:"zookeeper,omitempty" tf:"zookeeper,omitempty"` } - type KafkaClusterHostInitParameters struct { - } - type KafkaClusterHostObservation struct { + // The flag that defines whether a public IP address is assigned to the node. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// The flag that defines whether a public IP address is assigned to the node. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // Health of the host. + Health *string `json:"health,omitempty" tf:"health,omitempty"` -// Health of the host. -Health *string `json:"health,omitempty" tf:"health,omitempty"` + // The fully qualified domain name of the host. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The fully qualified domain name of the host. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Role of the host in the cluster. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// Role of the host in the cluster. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The ID of the subnet, to which the host belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet, to which the host belongs. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` - -// The availability zone where the Kafka host was created. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // The availability zone where the Kafka host was created. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type KafkaClusterHostParameters struct { - } - type KafkaClusterInitParameters struct { + // Configuration of the Kafka cluster. The structure is documented below. + Config []KafkaClusterConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` -// Configuration of the Kafka cluster. The structure is documented below. -Config []KafkaClusterConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` - -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the Kafka cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Kafka cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A list of IDs of the host groups to place VMs of the cluster on. -// +listType=set -HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + // A list of IDs of the host groups to place VMs of the cluster on. + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` -// A set of key/value label pairs to assign to the Kafka cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Kafka cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Maintenance policy of the Kafka cluster. The structure is documented below. -MaintenanceWindow []KafkaClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Maintenance policy of the Kafka cluster. The structure is documented below. + MaintenanceWindow []KafkaClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Name of the Kafka cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Kafka cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network, to which the Kafka cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network, to which the Kafka cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// Security group ids, to which the Kafka cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // Security group ids, to which the Kafka cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` -// IDs of the subnets, to which the Kafka cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + // IDs of the subnets, to which the Kafka cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` -// References to Subnet in vpc to populate subnetIds. -// +kubebuilder:validation:Optional -SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + // References to Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` -// Selector for a list of Subnet in vpc to populate subnetIds. -// +kubebuilder:validation:Optional -SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` + // Selector for a list of Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` -// (Deprecated) To manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic. -Topic []TopicInitParameters `json:"topic,omitempty" tf:"topic,omitempty"` + // (Deprecated) To manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic. + Topic []TopicInitParameters `json:"topic,omitempty" tf:"topic,omitempty"` -// (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_kafka_user. -User []KafkaClusterUserInitParameters `json:"user,omitempty" tf:"user,omitempty"` + // (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_kafka_user. + User []KafkaClusterUserInitParameters `json:"user,omitempty" tf:"user,omitempty"` } - type KafkaClusterMaintenanceWindowInitParameters struct { + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -Day *string `json:"day,omitempty" tf:"day,omitempty"` + // Hour of the day in UTC (in HH format). Allowed value is between 1 and 24. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Hour of the day in UTC (in HH format). Allowed value is between 1 and 24. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` - -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type KafkaClusterMaintenanceWindowObservation struct { + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -Day *string `json:"day,omitempty" tf:"day,omitempty"` + // Hour of the day in UTC (in HH format). Allowed value is between 1 and 24. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Hour of the day in UTC (in HH format). Allowed value is between 1 and 24. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` - -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type KafkaClusterMaintenanceWindowParameters struct { + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -// +kubebuilder:validation:Optional -Day *string `json:"day,omitempty" tf:"day,omitempty"` + // Hour of the day in UTC (in HH format). Allowed value is between 1 and 24. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Hour of the day in UTC (in HH format). Allowed value is between 1 and 24. -// +kubebuilder:validation:Optional -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` - -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` } - type KafkaClusterObservation struct { + // Configuration of the Kafka cluster. The structure is documented below. + Config []KafkaClusterConfigObservation `json:"config,omitempty" tf:"config,omitempty"` -// Configuration of the Kafka cluster. The structure is documented below. -Config []KafkaClusterConfigObservation `json:"config,omitempty" tf:"config,omitempty"` - -// Timestamp of cluster creation. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Timestamp of cluster creation. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the Kafka cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Kafka cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Aggregated health of the cluster. Can be either ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation. -Health *string `json:"health,omitempty" tf:"health,omitempty"` + // Aggregated health of the cluster. Can be either ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation. + Health *string `json:"health,omitempty" tf:"health,omitempty"` -// A host of the Kafka cluster. The structure is documented below. -Host []KafkaClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` + // A host of the Kafka cluster. The structure is documented below. + Host []KafkaClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` -// A list of IDs of the host groups to place VMs of the cluster on. -// +listType=set -HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + // A list of IDs of the host groups to place VMs of the cluster on. + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the Kafka cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Kafka cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Maintenance policy of the Kafka cluster. The structure is documented below. -MaintenanceWindow []KafkaClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Maintenance policy of the Kafka cluster. The structure is documented below. + MaintenanceWindow []KafkaClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Name of the Kafka cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Kafka cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network, to which the Kafka cluster belongs. -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network, to which the Kafka cluster belongs. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// Security group ids, to which the Kafka cluster belongs. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // Security group ids, to which the Kafka cluster belongs. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation. + Status *string `json:"status,omitempty" tf:"status,omitempty"` -// IDs of the subnets, to which the Kafka cluster belongs. -SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + // IDs of the subnets, to which the Kafka cluster belongs. + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` -// (Deprecated) To manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic. -Topic []TopicObservation `json:"topic,omitempty" tf:"topic,omitempty"` + // (Deprecated) To manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic. + Topic []TopicObservation `json:"topic,omitempty" tf:"topic,omitempty"` -// (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_kafka_user. -User []KafkaClusterUserObservation `json:"user,omitempty" tf:"user,omitempty"` + // (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_kafka_user. + User []KafkaClusterUserObservation `json:"user,omitempty" tf:"user,omitempty"` } - type KafkaClusterParameters struct { - -// Configuration of the Kafka cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Config []KafkaClusterConfigParameters `json:"config,omitempty" tf:"config,omitempty"` - -// Inhibits deletion of the cluster. Can be either true or false. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` - -// Description of the Kafka cluster. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION. -// +kubebuilder:validation:Optional -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` - -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` - -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` - -// A list of IDs of the host groups to place VMs of the cluster on. -// +kubebuilder:validation:Optional -// +listType=set -HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` - -// A set of key/value label pairs to assign to the Kafka cluster. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - -// Maintenance policy of the Kafka cluster. The structure is documented below. -// +kubebuilder:validation:Optional -MaintenanceWindow []KafkaClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` - -// Name of the Kafka cluster. Provided by the client when the cluster is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// ID of the network, to which the Kafka cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` - -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` - -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` - -// Security group ids, to which the Kafka cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` - -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` - -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` - -// IDs of the subnets, to which the Kafka cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` - -// References to Subnet in vpc to populate subnetIds. -// +kubebuilder:validation:Optional -SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` - -// Selector for a list of Subnet in vpc to populate subnetIds. -// +kubebuilder:validation:Optional -SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` - -// (Deprecated) To manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic. -// +kubebuilder:validation:Optional -Topic []TopicParameters `json:"topic,omitempty" tf:"topic,omitempty"` - -// (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_kafka_user. -// +kubebuilder:validation:Optional -User []KafkaClusterUserParameters `json:"user,omitempty" tf:"user,omitempty"` + // Configuration of the Kafka cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Config []KafkaClusterConfigParameters `json:"config,omitempty" tf:"config,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Kafka cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION. + // +kubebuilder:validation:Optional + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A list of IDs of the host groups to place VMs of the cluster on. + // +kubebuilder:validation:Optional + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // A set of key/value label pairs to assign to the Kafka cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Maintenance policy of the Kafka cluster. The structure is documented below. + // +kubebuilder:validation:Optional + MaintenanceWindow []KafkaClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Name of the Kafka cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the Kafka cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Security group ids, to which the Kafka cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // IDs of the subnets, to which the Kafka cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` + + // (Deprecated) To manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic. + // +kubebuilder:validation:Optional + Topic []TopicParameters `json:"topic,omitempty" tf:"topic,omitempty"` + + // (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_kafka_user. + // +kubebuilder:validation:Optional + User []KafkaClusterUserParameters `json:"user,omitempty" tf:"user,omitempty"` } - type KafkaClusterUserInitParameters struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The password of the user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// The password of the user. -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` - -// Set of permissions granted to the user. The structure is documented below. -Permission []UserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []UserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type KafkaClusterUserObservation struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Set of permissions granted to the user. The structure is documented below. -Permission []UserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []UserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` } - type KafkaClusterUserParameters struct { + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// The password of the user. -// +kubebuilder:validation:Optional -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// Set of permissions granted to the user. The structure is documented below. -// +kubebuilder:validation:Optional -Permission []UserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + // +kubebuilder:validation:Optional + Permission []UserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type KafkaConfigInitParameters struct { + AutoCreateTopicsEnable *bool `json:"autoCreateTopicsEnable,omitempty" tf:"auto_create_topics_enable,omitempty"` + // Kafka topic settings. For more information, see the official documentation and the Kafka documentation. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` -AutoCreateTopicsEnable *bool `json:"autoCreateTopicsEnable,omitempty" tf:"auto_create_topics_enable,omitempty"` + DefaultReplicationFactor *string `json:"defaultReplicationFactor,omitempty" tf:"default_replication_factor,omitempty"` -// Kafka topic settings. For more information, see the official documentation and the Kafka documentation. -CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + LogFlushIntervalMessages *string `json:"logFlushIntervalMessages,omitempty" tf:"log_flush_interval_messages,omitempty"` -DefaultReplicationFactor *string `json:"defaultReplicationFactor,omitempty" tf:"default_replication_factor,omitempty"` + LogFlushIntervalMs *string `json:"logFlushIntervalMs,omitempty" tf:"log_flush_interval_ms,omitempty"` -LogFlushIntervalMessages *string `json:"logFlushIntervalMessages,omitempty" tf:"log_flush_interval_messages,omitempty"` + LogFlushSchedulerIntervalMs *string `json:"logFlushSchedulerIntervalMs,omitempty" tf:"log_flush_scheduler_interval_ms,omitempty"` -LogFlushIntervalMs *string `json:"logFlushIntervalMs,omitempty" tf:"log_flush_interval_ms,omitempty"` + LogPreallocate *bool `json:"logPreallocate,omitempty" tf:"log_preallocate,omitempty"` -LogFlushSchedulerIntervalMs *string `json:"logFlushSchedulerIntervalMs,omitempty" tf:"log_flush_scheduler_interval_ms,omitempty"` + LogRetentionBytes *string `json:"logRetentionBytes,omitempty" tf:"log_retention_bytes,omitempty"` -LogPreallocate *bool `json:"logPreallocate,omitempty" tf:"log_preallocate,omitempty"` + LogRetentionHours *string `json:"logRetentionHours,omitempty" tf:"log_retention_hours,omitempty"` -LogRetentionBytes *string `json:"logRetentionBytes,omitempty" tf:"log_retention_bytes,omitempty"` + LogRetentionMinutes *string `json:"logRetentionMinutes,omitempty" tf:"log_retention_minutes,omitempty"` -LogRetentionHours *string `json:"logRetentionHours,omitempty" tf:"log_retention_hours,omitempty"` + LogRetentionMs *string `json:"logRetentionMs,omitempty" tf:"log_retention_ms,omitempty"` -LogRetentionMinutes *string `json:"logRetentionMinutes,omitempty" tf:"log_retention_minutes,omitempty"` + LogSegmentBytes *string `json:"logSegmentBytes,omitempty" tf:"log_segment_bytes,omitempty"` -LogRetentionMs *string `json:"logRetentionMs,omitempty" tf:"log_retention_ms,omitempty"` + MessageMaxBytes *string `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"` -LogSegmentBytes *string `json:"logSegmentBytes,omitempty" tf:"log_segment_bytes,omitempty"` + NumPartitions *string `json:"numPartitions,omitempty" tf:"num_partitions,omitempty"` -MessageMaxBytes *string `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"` + OffsetsRetentionMinutes *string `json:"offsetsRetentionMinutes,omitempty" tf:"offsets_retention_minutes,omitempty"` -NumPartitions *string `json:"numPartitions,omitempty" tf:"num_partitions,omitempty"` + ReplicaFetchMaxBytes *string `json:"replicaFetchMaxBytes,omitempty" tf:"replica_fetch_max_bytes,omitempty"` -OffsetsRetentionMinutes *string `json:"offsetsRetentionMinutes,omitempty" tf:"offsets_retention_minutes,omitempty"` + // +listType=set + SSLCipherSuites []*string `json:"sslCipherSuites,omitempty" tf:"ssl_cipher_suites,omitempty"` -ReplicaFetchMaxBytes *string `json:"replicaFetchMaxBytes,omitempty" tf:"replica_fetch_max_bytes,omitempty"` + // +listType=set + SaslEnabledMechanisms []*string `json:"saslEnabledMechanisms,omitempty" tf:"sasl_enabled_mechanisms,omitempty"` -// +listType=set -SSLCipherSuites []*string `json:"sslCipherSuites,omitempty" tf:"ssl_cipher_suites,omitempty"` + SocketReceiveBufferBytes *string `json:"socketReceiveBufferBytes,omitempty" tf:"socket_receive_buffer_bytes,omitempty"` -// +listType=set -SaslEnabledMechanisms []*string `json:"saslEnabledMechanisms,omitempty" tf:"sasl_enabled_mechanisms,omitempty"` - -SocketReceiveBufferBytes *string `json:"socketReceiveBufferBytes,omitempty" tf:"socket_receive_buffer_bytes,omitempty"` - -SocketSendBufferBytes *string `json:"socketSendBufferBytes,omitempty" tf:"socket_send_buffer_bytes,omitempty"` + SocketSendBufferBytes *string `json:"socketSendBufferBytes,omitempty" tf:"socket_send_buffer_bytes,omitempty"` } - type KafkaConfigObservation struct { + AutoCreateTopicsEnable *bool `json:"autoCreateTopicsEnable,omitempty" tf:"auto_create_topics_enable,omitempty"` + // Kafka topic settings. For more information, see the official documentation and the Kafka documentation. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` -AutoCreateTopicsEnable *bool `json:"autoCreateTopicsEnable,omitempty" tf:"auto_create_topics_enable,omitempty"` + DefaultReplicationFactor *string `json:"defaultReplicationFactor,omitempty" tf:"default_replication_factor,omitempty"` -// Kafka topic settings. For more information, see the official documentation and the Kafka documentation. -CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + LogFlushIntervalMessages *string `json:"logFlushIntervalMessages,omitempty" tf:"log_flush_interval_messages,omitempty"` -DefaultReplicationFactor *string `json:"defaultReplicationFactor,omitempty" tf:"default_replication_factor,omitempty"` + LogFlushIntervalMs *string `json:"logFlushIntervalMs,omitempty" tf:"log_flush_interval_ms,omitempty"` -LogFlushIntervalMessages *string `json:"logFlushIntervalMessages,omitempty" tf:"log_flush_interval_messages,omitempty"` + LogFlushSchedulerIntervalMs *string `json:"logFlushSchedulerIntervalMs,omitempty" tf:"log_flush_scheduler_interval_ms,omitempty"` -LogFlushIntervalMs *string `json:"logFlushIntervalMs,omitempty" tf:"log_flush_interval_ms,omitempty"` + LogPreallocate *bool `json:"logPreallocate,omitempty" tf:"log_preallocate,omitempty"` -LogFlushSchedulerIntervalMs *string `json:"logFlushSchedulerIntervalMs,omitempty" tf:"log_flush_scheduler_interval_ms,omitempty"` + LogRetentionBytes *string `json:"logRetentionBytes,omitempty" tf:"log_retention_bytes,omitempty"` -LogPreallocate *bool `json:"logPreallocate,omitempty" tf:"log_preallocate,omitempty"` + LogRetentionHours *string `json:"logRetentionHours,omitempty" tf:"log_retention_hours,omitempty"` -LogRetentionBytes *string `json:"logRetentionBytes,omitempty" tf:"log_retention_bytes,omitempty"` + LogRetentionMinutes *string `json:"logRetentionMinutes,omitempty" tf:"log_retention_minutes,omitempty"` -LogRetentionHours *string `json:"logRetentionHours,omitempty" tf:"log_retention_hours,omitempty"` + LogRetentionMs *string `json:"logRetentionMs,omitempty" tf:"log_retention_ms,omitempty"` -LogRetentionMinutes *string `json:"logRetentionMinutes,omitempty" tf:"log_retention_minutes,omitempty"` + LogSegmentBytes *string `json:"logSegmentBytes,omitempty" tf:"log_segment_bytes,omitempty"` -LogRetentionMs *string `json:"logRetentionMs,omitempty" tf:"log_retention_ms,omitempty"` + MessageMaxBytes *string `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"` -LogSegmentBytes *string `json:"logSegmentBytes,omitempty" tf:"log_segment_bytes,omitempty"` + NumPartitions *string `json:"numPartitions,omitempty" tf:"num_partitions,omitempty"` -MessageMaxBytes *string `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"` + OffsetsRetentionMinutes *string `json:"offsetsRetentionMinutes,omitempty" tf:"offsets_retention_minutes,omitempty"` -NumPartitions *string `json:"numPartitions,omitempty" tf:"num_partitions,omitempty"` + ReplicaFetchMaxBytes *string `json:"replicaFetchMaxBytes,omitempty" tf:"replica_fetch_max_bytes,omitempty"` -OffsetsRetentionMinutes *string `json:"offsetsRetentionMinutes,omitempty" tf:"offsets_retention_minutes,omitempty"` + // +listType=set + SSLCipherSuites []*string `json:"sslCipherSuites,omitempty" tf:"ssl_cipher_suites,omitempty"` -ReplicaFetchMaxBytes *string `json:"replicaFetchMaxBytes,omitempty" tf:"replica_fetch_max_bytes,omitempty"` + // +listType=set + SaslEnabledMechanisms []*string `json:"saslEnabledMechanisms,omitempty" tf:"sasl_enabled_mechanisms,omitempty"` -// +listType=set -SSLCipherSuites []*string `json:"sslCipherSuites,omitempty" tf:"ssl_cipher_suites,omitempty"` + SocketReceiveBufferBytes *string `json:"socketReceiveBufferBytes,omitempty" tf:"socket_receive_buffer_bytes,omitempty"` -// +listType=set -SaslEnabledMechanisms []*string `json:"saslEnabledMechanisms,omitempty" tf:"sasl_enabled_mechanisms,omitempty"` - -SocketReceiveBufferBytes *string `json:"socketReceiveBufferBytes,omitempty" tf:"socket_receive_buffer_bytes,omitempty"` - -SocketSendBufferBytes *string `json:"socketSendBufferBytes,omitempty" tf:"socket_send_buffer_bytes,omitempty"` + SocketSendBufferBytes *string `json:"socketSendBufferBytes,omitempty" tf:"socket_send_buffer_bytes,omitempty"` } - type KafkaConfigParameters struct { + // +kubebuilder:validation:Optional + AutoCreateTopicsEnable *bool `json:"autoCreateTopicsEnable,omitempty" tf:"auto_create_topics_enable,omitempty"` -// +kubebuilder:validation:Optional -AutoCreateTopicsEnable *bool `json:"autoCreateTopicsEnable,omitempty" tf:"auto_create_topics_enable,omitempty"` + // Kafka topic settings. For more information, see the official documentation and the Kafka documentation. + // +kubebuilder:validation:Optional + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` -// Kafka topic settings. For more information, see the official documentation and the Kafka documentation. -// +kubebuilder:validation:Optional -CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + // +kubebuilder:validation:Optional + DefaultReplicationFactor *string `json:"defaultReplicationFactor,omitempty" tf:"default_replication_factor,omitempty"` -// +kubebuilder:validation:Optional -DefaultReplicationFactor *string `json:"defaultReplicationFactor,omitempty" tf:"default_replication_factor,omitempty"` + // +kubebuilder:validation:Optional + LogFlushIntervalMessages *string `json:"logFlushIntervalMessages,omitempty" tf:"log_flush_interval_messages,omitempty"` -// +kubebuilder:validation:Optional -LogFlushIntervalMessages *string `json:"logFlushIntervalMessages,omitempty" tf:"log_flush_interval_messages,omitempty"` + // +kubebuilder:validation:Optional + LogFlushIntervalMs *string `json:"logFlushIntervalMs,omitempty" tf:"log_flush_interval_ms,omitempty"` -// +kubebuilder:validation:Optional -LogFlushIntervalMs *string `json:"logFlushIntervalMs,omitempty" tf:"log_flush_interval_ms,omitempty"` + // +kubebuilder:validation:Optional + LogFlushSchedulerIntervalMs *string `json:"logFlushSchedulerIntervalMs,omitempty" tf:"log_flush_scheduler_interval_ms,omitempty"` -// +kubebuilder:validation:Optional -LogFlushSchedulerIntervalMs *string `json:"logFlushSchedulerIntervalMs,omitempty" tf:"log_flush_scheduler_interval_ms,omitempty"` + // +kubebuilder:validation:Optional + LogPreallocate *bool `json:"logPreallocate,omitempty" tf:"log_preallocate,omitempty"` -// +kubebuilder:validation:Optional -LogPreallocate *bool `json:"logPreallocate,omitempty" tf:"log_preallocate,omitempty"` + // +kubebuilder:validation:Optional + LogRetentionBytes *string `json:"logRetentionBytes,omitempty" tf:"log_retention_bytes,omitempty"` -// +kubebuilder:validation:Optional -LogRetentionBytes *string `json:"logRetentionBytes,omitempty" tf:"log_retention_bytes,omitempty"` + // +kubebuilder:validation:Optional + LogRetentionHours *string `json:"logRetentionHours,omitempty" tf:"log_retention_hours,omitempty"` -// +kubebuilder:validation:Optional -LogRetentionHours *string `json:"logRetentionHours,omitempty" tf:"log_retention_hours,omitempty"` + // +kubebuilder:validation:Optional + LogRetentionMinutes *string `json:"logRetentionMinutes,omitempty" tf:"log_retention_minutes,omitempty"` -// +kubebuilder:validation:Optional -LogRetentionMinutes *string `json:"logRetentionMinutes,omitempty" tf:"log_retention_minutes,omitempty"` + // +kubebuilder:validation:Optional + LogRetentionMs *string `json:"logRetentionMs,omitempty" tf:"log_retention_ms,omitempty"` -// +kubebuilder:validation:Optional -LogRetentionMs *string `json:"logRetentionMs,omitempty" tf:"log_retention_ms,omitempty"` + // +kubebuilder:validation:Optional + LogSegmentBytes *string `json:"logSegmentBytes,omitempty" tf:"log_segment_bytes,omitempty"` -// +kubebuilder:validation:Optional -LogSegmentBytes *string `json:"logSegmentBytes,omitempty" tf:"log_segment_bytes,omitempty"` + // +kubebuilder:validation:Optional + MessageMaxBytes *string `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"` -// +kubebuilder:validation:Optional -MessageMaxBytes *string `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"` + // +kubebuilder:validation:Optional + NumPartitions *string `json:"numPartitions,omitempty" tf:"num_partitions,omitempty"` -// +kubebuilder:validation:Optional -NumPartitions *string `json:"numPartitions,omitempty" tf:"num_partitions,omitempty"` + // +kubebuilder:validation:Optional + OffsetsRetentionMinutes *string `json:"offsetsRetentionMinutes,omitempty" tf:"offsets_retention_minutes,omitempty"` -// +kubebuilder:validation:Optional -OffsetsRetentionMinutes *string `json:"offsetsRetentionMinutes,omitempty" tf:"offsets_retention_minutes,omitempty"` + // +kubebuilder:validation:Optional + ReplicaFetchMaxBytes *string `json:"replicaFetchMaxBytes,omitempty" tf:"replica_fetch_max_bytes,omitempty"` -// +kubebuilder:validation:Optional -ReplicaFetchMaxBytes *string `json:"replicaFetchMaxBytes,omitempty" tf:"replica_fetch_max_bytes,omitempty"` + // +kubebuilder:validation:Optional + // +listType=set + SSLCipherSuites []*string `json:"sslCipherSuites,omitempty" tf:"ssl_cipher_suites,omitempty"` -// +kubebuilder:validation:Optional -// +listType=set -SSLCipherSuites []*string `json:"sslCipherSuites,omitempty" tf:"ssl_cipher_suites,omitempty"` + // +kubebuilder:validation:Optional + // +listType=set + SaslEnabledMechanisms []*string `json:"saslEnabledMechanisms,omitempty" tf:"sasl_enabled_mechanisms,omitempty"` -// +kubebuilder:validation:Optional -// +listType=set -SaslEnabledMechanisms []*string `json:"saslEnabledMechanisms,omitempty" tf:"sasl_enabled_mechanisms,omitempty"` + // +kubebuilder:validation:Optional + SocketReceiveBufferBytes *string `json:"socketReceiveBufferBytes,omitempty" tf:"socket_receive_buffer_bytes,omitempty"` -// +kubebuilder:validation:Optional -SocketReceiveBufferBytes *string `json:"socketReceiveBufferBytes,omitempty" tf:"socket_receive_buffer_bytes,omitempty"` - -// +kubebuilder:validation:Optional -SocketSendBufferBytes *string `json:"socketSendBufferBytes,omitempty" tf:"socket_send_buffer_bytes,omitempty"` + // +kubebuilder:validation:Optional + SocketSendBufferBytes *string `json:"socketSendBufferBytes,omitempty" tf:"socket_send_buffer_bytes,omitempty"` } - type KafkaResourcesInitParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type KafkaResourcesObservation struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type KafkaResourcesParameters struct { + // Volume of the storage available to a ZooKeeper host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a ZooKeeper host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` - -// Type of the storage of ZooKeeper hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` + // Type of the storage of ZooKeeper hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type TopicConfigInitParameters struct { + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // Kafka topic settings. For more information, see the official documentation and the Kafka documentation. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` - -// Kafka topic settings. For more information, see the official documentation and the Kafka documentation. -CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` - -DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` + DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` -FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` + FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` -FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` + FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` -FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` + FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` -MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` + MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` -MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` + MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` -MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` + MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` -Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` + Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` -RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` + RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` -RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` + RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` -SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` + SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` } - type TopicConfigObservation struct { + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // Kafka topic settings. For more information, see the official documentation and the Kafka documentation. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` - -// Kafka topic settings. For more information, see the official documentation and the Kafka documentation. -CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` - -DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` + DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` -FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` + FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` -FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` + FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` -FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` + FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` -MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` + MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` -MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` + MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` -MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` + MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` -Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` + Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` -RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` + RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` -RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` + RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` -SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` + SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` } - type TopicConfigParameters struct { + // +kubebuilder:validation:Optional + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// +kubebuilder:validation:Optional -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` - -// Kafka topic settings. For more information, see the official documentation and the Kafka documentation. -// +kubebuilder:validation:Optional -CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + // Kafka topic settings. For more information, see the official documentation and the Kafka documentation. + // +kubebuilder:validation:Optional + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` -// +kubebuilder:validation:Optional -DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` + // +kubebuilder:validation:Optional + DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` -// +kubebuilder:validation:Optional -FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` + // +kubebuilder:validation:Optional + FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` -// +kubebuilder:validation:Optional -FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` + // +kubebuilder:validation:Optional + FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` -// +kubebuilder:validation:Optional -FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` + // +kubebuilder:validation:Optional + FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` -// +kubebuilder:validation:Optional -MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` + // +kubebuilder:validation:Optional + MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` -// +kubebuilder:validation:Optional -MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` + // +kubebuilder:validation:Optional + MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` -// +kubebuilder:validation:Optional -MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` + // +kubebuilder:validation:Optional + MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` -// +kubebuilder:validation:Optional -Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` + // +kubebuilder:validation:Optional + Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` -// +kubebuilder:validation:Optional -RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` + // +kubebuilder:validation:Optional + RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` -// +kubebuilder:validation:Optional -RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` + // +kubebuilder:validation:Optional + RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` -// +kubebuilder:validation:Optional -SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` + // +kubebuilder:validation:Optional + SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` } - type TopicInitParameters struct { + // The name of the topic. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the topic. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The number of the topic's partitions. + Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` -// The number of the topic's partitions. -Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` + // Amount of data copies (replicas) for the topic in the cluster. + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` -// Amount of data copies (replicas) for the topic in the cluster. -ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` - -// User-defined settings for the topic. The structure is documented below. -TopicConfig []TopicConfigInitParameters `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` + // User-defined settings for the topic. The structure is documented below. + TopicConfig []TopicConfigInitParameters `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` } - type TopicObservation struct { + // The name of the topic. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the topic. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// The number of the topic's partitions. -Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` + // The number of the topic's partitions. + Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` -// Amount of data copies (replicas) for the topic in the cluster. -ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` + // Amount of data copies (replicas) for the topic in the cluster. + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` -// User-defined settings for the topic. The structure is documented below. -TopicConfig []TopicConfigObservation `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` + // User-defined settings for the topic. The structure is documented below. + TopicConfig []TopicConfigObservation `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` } - type TopicParameters struct { + // The name of the topic. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of the topic. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` - -// The number of the topic's partitions. -// +kubebuilder:validation:Optional -Partitions *float64 `json:"partitions" tf:"partitions,omitempty"` + // The number of the topic's partitions. + // +kubebuilder:validation:Optional + Partitions *float64 `json:"partitions" tf:"partitions,omitempty"` -// Amount of data copies (replicas) for the topic in the cluster. -// +kubebuilder:validation:Optional -ReplicationFactor *float64 `json:"replicationFactor" tf:"replication_factor,omitempty"` + // Amount of data copies (replicas) for the topic in the cluster. + // +kubebuilder:validation:Optional + ReplicationFactor *float64 `json:"replicationFactor" tf:"replication_factor,omitempty"` -// User-defined settings for the topic. The structure is documented below. -// +kubebuilder:validation:Optional -TopicConfig []TopicConfigParameters `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` + // User-defined settings for the topic. The structure is documented below. + // +kubebuilder:validation:Optional + TopicConfig []TopicConfigParameters `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` } - type UserPermissionInitParameters struct { + // Set of hosts, to which this permission grants access to. + // +listType=set + AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` -// Set of hosts, to which this permission grants access to. -// +listType=set -AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` + // The role type to grant to the topic. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role type to grant to the topic. -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -// The name of the topic that the permission grants access to. -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // The name of the topic that the permission grants access to. + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` } - type UserPermissionObservation struct { + // Set of hosts, to which this permission grants access to. + // +listType=set + AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` -// Set of hosts, to which this permission grants access to. -// +listType=set -AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` - -// The role type to grant to the topic. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role type to grant to the topic. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The name of the topic that the permission grants access to. -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // The name of the topic that the permission grants access to. + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` } - type UserPermissionParameters struct { + // Set of hosts, to which this permission grants access to. + // +kubebuilder:validation:Optional + // +listType=set + AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` -// Set of hosts, to which this permission grants access to. -// +kubebuilder:validation:Optional -// +listType=set -AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` - -// The role type to grant to the topic. -// +kubebuilder:validation:Optional -Role *string `json:"role" tf:"role,omitempty"` + // The role type to grant to the topic. + // +kubebuilder:validation:Optional + Role *string `json:"role" tf:"role,omitempty"` -// The name of the topic that the permission grants access to. -// +kubebuilder:validation:Optional -TopicName *string `json:"topicName" tf:"topic_name,omitempty"` + // The name of the topic that the permission grants access to. + // +kubebuilder:validation:Optional + TopicName *string `json:"topicName" tf:"topic_name,omitempty"` } // KafkaClusterSpec defines the desired state of KafkaCluster type KafkaClusterSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider KafkaClusterParameters `json:"forProvider"` + ForProvider KafkaClusterParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -1100,20 +1001,19 @@ type KafkaClusterSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider KafkaClusterInitParameters `json:"initProvider,omitempty"` + InitProvider KafkaClusterInitParameters `json:"initProvider,omitempty"` } // KafkaClusterStatus defines the observed state of KafkaCluster. type KafkaClusterStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider KafkaClusterObservation `json:"atProvider,omitempty"` + AtProvider KafkaClusterObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // KafkaCluster is the Schema for the KafkaClusters API. Manages a Kafka cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -1123,10 +1023,10 @@ type KafkaClusterStatus struct { type KafkaCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.config) || (has(self.initProvider) && has(self.initProvider.config))",message="spec.forProvider.config is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec KafkaClusterSpec `json:"spec"` - Status KafkaClusterStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.config) || (has(self.initProvider) && has(self.initProvider.config))",message="spec.forProvider.config is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec KafkaClusterSpec `json:"spec"` + Status KafkaClusterStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_kafkaconnector_terraformed.go b/apis/mdb/v1alpha1/zz_kafkaconnector_terraformed.go new file mode 100755 index 0000000..92e65b8 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_kafkaconnector_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this KafkaConnector +func (mg *KafkaConnector) GetTerraformResourceType() string { + return "yandex_mdb_kafka_connector" +} + +// GetConnectionDetailsMapping for this KafkaConnector +func (tr *KafkaConnector) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"connector_config_mirrormaker[*].source_cluster[*].external_cluster[*].sasl_password": "connectorConfigMirrormaker[*].sourceCluster[*].externalCluster[*].saslPasswordSecretRef", "connector_config_mirrormaker[*].target_cluster[*].external_cluster[*].sasl_password": "connectorConfigMirrormaker[*].targetCluster[*].externalCluster[*].saslPasswordSecretRef", "connector_config_s3_sink[*].s3_connection[*].external_s3[*].secret_access_key": "connectorConfigS3Sink[*].s3Connection[*].externalS3[*].secretAccessKeySecretRef"} +} + +// GetObservation of this KafkaConnector +func (tr *KafkaConnector) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this KafkaConnector +func (tr *KafkaConnector) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this KafkaConnector +func (tr *KafkaConnector) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this KafkaConnector +func (tr *KafkaConnector) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this KafkaConnector +func (tr *KafkaConnector) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this KafkaConnector +func (tr *KafkaConnector) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this KafkaConnector +func (tr *KafkaConnector) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this KafkaConnector using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *KafkaConnector) LateInitialize(attrs []byte) (bool, error) { + params := &KafkaConnectorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *KafkaConnector) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_kafkaconnector_types.go b/apis/mdb/v1alpha1/zz_kafkaconnector_types.go index 3415148..62fea54 100755 --- a/apis/mdb/v1alpha1/zz_kafkaconnector_types.go +++ b/apis/mdb/v1alpha1/zz_kafkaconnector_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,551 +7,481 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type ConnectorConfigMirrormakerInitParameters struct { + // Replication factor for topics created in target cluster + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` -// Replication factor for topics created in target cluster -ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` - -// Settings for source cluster. The structure is documented below. -SourceCluster []SourceClusterInitParameters `json:"sourceCluster,omitempty" tf:"source_cluster,omitempty"` + // Settings for source cluster. The structure is documented below. + SourceCluster []SourceClusterInitParameters `json:"sourceCluster,omitempty" tf:"source_cluster,omitempty"` -// Settings for target cluster. The structure is documented below. -TargetCluster []TargetClusterInitParameters `json:"targetCluster,omitempty" tf:"target_cluster,omitempty"` + // Settings for target cluster. The structure is documented below. + TargetCluster []TargetClusterInitParameters `json:"targetCluster,omitempty" tf:"target_cluster,omitempty"` -// The pattern for topic names to be replicated. -Topics *string `json:"topics,omitempty" tf:"topics,omitempty"` + // The pattern for topic names to be replicated. + Topics *string `json:"topics,omitempty" tf:"topics,omitempty"` } - type ConnectorConfigMirrormakerObservation struct { + // Replication factor for topics created in target cluster + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` -// Replication factor for topics created in target cluster -ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` + // Settings for source cluster. The structure is documented below. + SourceCluster []SourceClusterObservation `json:"sourceCluster,omitempty" tf:"source_cluster,omitempty"` -// Settings for source cluster. The structure is documented below. -SourceCluster []SourceClusterObservation `json:"sourceCluster,omitempty" tf:"source_cluster,omitempty"` + // Settings for target cluster. The structure is documented below. + TargetCluster []TargetClusterObservation `json:"targetCluster,omitempty" tf:"target_cluster,omitempty"` -// Settings for target cluster. The structure is documented below. -TargetCluster []TargetClusterObservation `json:"targetCluster,omitempty" tf:"target_cluster,omitempty"` - -// The pattern for topic names to be replicated. -Topics *string `json:"topics,omitempty" tf:"topics,omitempty"` + // The pattern for topic names to be replicated. + Topics *string `json:"topics,omitempty" tf:"topics,omitempty"` } - type ConnectorConfigMirrormakerParameters struct { + // Replication factor for topics created in target cluster + // +kubebuilder:validation:Optional + ReplicationFactor *float64 `json:"replicationFactor" tf:"replication_factor,omitempty"` -// Replication factor for topics created in target cluster -// +kubebuilder:validation:Optional -ReplicationFactor *float64 `json:"replicationFactor" tf:"replication_factor,omitempty"` + // Settings for source cluster. The structure is documented below. + // +kubebuilder:validation:Optional + SourceCluster []SourceClusterParameters `json:"sourceCluster" tf:"source_cluster,omitempty"` -// Settings for source cluster. The structure is documented below. -// +kubebuilder:validation:Optional -SourceCluster []SourceClusterParameters `json:"sourceCluster" tf:"source_cluster,omitempty"` + // Settings for target cluster. The structure is documented below. + // +kubebuilder:validation:Optional + TargetCluster []TargetClusterParameters `json:"targetCluster" tf:"target_cluster,omitempty"` -// Settings for target cluster. The structure is documented below. -// +kubebuilder:validation:Optional -TargetCluster []TargetClusterParameters `json:"targetCluster" tf:"target_cluster,omitempty"` - -// The pattern for topic names to be replicated. -// +kubebuilder:validation:Optional -Topics *string `json:"topics" tf:"topics,omitempty"` + // The pattern for topic names to be replicated. + // +kubebuilder:validation:Optional + Topics *string `json:"topics" tf:"topics,omitempty"` } - type ConnectorConfigS3SinkInitParameters struct { + // Сompression type for messages. Cannot be changed. + FileCompressionType *string `json:"fileCompressionType,omitempty" tf:"file_compression_type,omitempty"` -// Сompression type for messages. Cannot be changed. -FileCompressionType *string `json:"fileCompressionType,omitempty" tf:"file_compression_type,omitempty"` - -// Max records per file. -FileMaxRecords *float64 `json:"fileMaxRecords,omitempty" tf:"file_max_records,omitempty"` + // Max records per file. + FileMaxRecords *float64 `json:"fileMaxRecords,omitempty" tf:"file_max_records,omitempty"` -// Settings for connection to s3-compatible storage. The structure is documented below. -S3Connection []S3ConnectionInitParameters `json:"s3Connection,omitempty" tf:"s3_connection,omitempty"` + // Settings for connection to s3-compatible storage. The structure is documented below. + S3Connection []S3ConnectionInitParameters `json:"s3Connection,omitempty" tf:"s3_connection,omitempty"` -// The pattern for topic names to be copied to s3 bucket. -Topics *string `json:"topics,omitempty" tf:"topics,omitempty"` + // The pattern for topic names to be copied to s3 bucket. + Topics *string `json:"topics,omitempty" tf:"topics,omitempty"` } - type ConnectorConfigS3SinkObservation struct { + // Сompression type for messages. Cannot be changed. + FileCompressionType *string `json:"fileCompressionType,omitempty" tf:"file_compression_type,omitempty"` -// Сompression type for messages. Cannot be changed. -FileCompressionType *string `json:"fileCompressionType,omitempty" tf:"file_compression_type,omitempty"` - -// Max records per file. -FileMaxRecords *float64 `json:"fileMaxRecords,omitempty" tf:"file_max_records,omitempty"` + // Max records per file. + FileMaxRecords *float64 `json:"fileMaxRecords,omitempty" tf:"file_max_records,omitempty"` -// Settings for connection to s3-compatible storage. The structure is documented below. -S3Connection []S3ConnectionObservation `json:"s3Connection,omitempty" tf:"s3_connection,omitempty"` + // Settings for connection to s3-compatible storage. The structure is documented below. + S3Connection []S3ConnectionObservation `json:"s3Connection,omitempty" tf:"s3_connection,omitempty"` -// The pattern for topic names to be copied to s3 bucket. -Topics *string `json:"topics,omitempty" tf:"topics,omitempty"` + // The pattern for topic names to be copied to s3 bucket. + Topics *string `json:"topics,omitempty" tf:"topics,omitempty"` } - type ConnectorConfigS3SinkParameters struct { + // Сompression type for messages. Cannot be changed. + // +kubebuilder:validation:Optional + FileCompressionType *string `json:"fileCompressionType" tf:"file_compression_type,omitempty"` -// Сompression type for messages. Cannot be changed. -// +kubebuilder:validation:Optional -FileCompressionType *string `json:"fileCompressionType" tf:"file_compression_type,omitempty"` - -// Max records per file. -// +kubebuilder:validation:Optional -FileMaxRecords *float64 `json:"fileMaxRecords,omitempty" tf:"file_max_records,omitempty"` + // Max records per file. + // +kubebuilder:validation:Optional + FileMaxRecords *float64 `json:"fileMaxRecords,omitempty" tf:"file_max_records,omitempty"` -// Settings for connection to s3-compatible storage. The structure is documented below. -// +kubebuilder:validation:Optional -S3Connection []S3ConnectionParameters `json:"s3Connection" tf:"s3_connection,omitempty"` + // Settings for connection to s3-compatible storage. The structure is documented below. + // +kubebuilder:validation:Optional + S3Connection []S3ConnectionParameters `json:"s3Connection" tf:"s3_connection,omitempty"` -// The pattern for topic names to be copied to s3 bucket. -// +kubebuilder:validation:Optional -Topics *string `json:"topics" tf:"topics,omitempty"` + // The pattern for topic names to be copied to s3 bucket. + // +kubebuilder:validation:Optional + Topics *string `json:"topics" tf:"topics,omitempty"` } - type ExternalClusterInitParameters struct { + // List of bootstrap servers to connect to cluster + BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` -// List of bootstrap servers to connect to cluster -BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` - -// Type of SASL authentification mechanism to use -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // Type of SASL authentification mechanism to use + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// Password to use in SASL authentification mechanism -SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + // Password to use in SASL authentification mechanism + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` -// Username to use in SASL authentification mechanism -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + // Username to use in SASL authentification mechanism + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Security protocol to use -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + // Security protocol to use + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` } - type ExternalClusterObservation struct { + // List of bootstrap servers to connect to cluster + BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` -// List of bootstrap servers to connect to cluster -BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` - -// Type of SASL authentification mechanism to use -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // Type of SASL authentification mechanism to use + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// Username to use in SASL authentification mechanism -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + // Username to use in SASL authentification mechanism + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Security protocol to use -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + // Security protocol to use + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` } - type ExternalClusterParameters struct { + // List of bootstrap servers to connect to cluster + // +kubebuilder:validation:Optional + BootstrapServers *string `json:"bootstrapServers" tf:"bootstrap_servers,omitempty"` -// List of bootstrap servers to connect to cluster -// +kubebuilder:validation:Optional -BootstrapServers *string `json:"bootstrapServers" tf:"bootstrap_servers,omitempty"` + // Type of SASL authentification mechanism to use + // +kubebuilder:validation:Optional + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// Type of SASL authentification mechanism to use -// +kubebuilder:validation:Optional -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // Password to use in SASL authentification mechanism + // +kubebuilder:validation:Optional + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` -// Password to use in SASL authentification mechanism -// +kubebuilder:validation:Optional -SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + // Username to use in SASL authentification mechanism + // +kubebuilder:validation:Optional + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Username to use in SASL authentification mechanism -// +kubebuilder:validation:Optional -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` - -// Security protocol to use -// +kubebuilder:validation:Optional -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + // Security protocol to use + // +kubebuilder:validation:Optional + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` } - type ExternalS3InitParameters struct { + // ID of aws-compatible static key. + AccessKeyID *string `json:"accessKeyId,omitempty" tf:"access_key_id,omitempty"` -// ID of aws-compatible static key. -AccessKeyID *string `json:"accessKeyId,omitempty" tf:"access_key_id,omitempty"` + // URL of s3-compatible storage. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// URL of s3-compatible storage. -Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + // region of s3-compatible storage. Available region list. + Region *string `json:"region,omitempty" tf:"region,omitempty"` -// region of s3-compatible storage. Available region list. -Region *string `json:"region,omitempty" tf:"region,omitempty"` - -// Secret key of aws-compatible static key. -SecretAccessKeySecretRef *v1.SecretKeySelector `json:"secretAccessKeySecretRef,omitempty" tf:"-"` + // Secret key of aws-compatible static key. + SecretAccessKeySecretRef *v1.SecretKeySelector `json:"secretAccessKeySecretRef,omitempty" tf:"-"` } - type ExternalS3Observation struct { + // ID of aws-compatible static key. + AccessKeyID *string `json:"accessKeyId,omitempty" tf:"access_key_id,omitempty"` -// ID of aws-compatible static key. -AccessKeyID *string `json:"accessKeyId,omitempty" tf:"access_key_id,omitempty"` + // URL of s3-compatible storage. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` -// URL of s3-compatible storage. -Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` - -// region of s3-compatible storage. Available region list. -Region *string `json:"region,omitempty" tf:"region,omitempty"` + // region of s3-compatible storage. Available region list. + Region *string `json:"region,omitempty" tf:"region,omitempty"` } - type ExternalS3Parameters struct { + // ID of aws-compatible static key. + // +kubebuilder:validation:Optional + AccessKeyID *string `json:"accessKeyId,omitempty" tf:"access_key_id,omitempty"` -// ID of aws-compatible static key. -// +kubebuilder:validation:Optional -AccessKeyID *string `json:"accessKeyId,omitempty" tf:"access_key_id,omitempty"` + // URL of s3-compatible storage. + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint" tf:"endpoint,omitempty"` -// URL of s3-compatible storage. -// +kubebuilder:validation:Optional -Endpoint *string `json:"endpoint" tf:"endpoint,omitempty"` + // region of s3-compatible storage. Available region list. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` -// region of s3-compatible storage. Available region list. -// +kubebuilder:validation:Optional -Region *string `json:"region,omitempty" tf:"region,omitempty"` - -// Secret key of aws-compatible static key. -// +kubebuilder:validation:Optional -SecretAccessKeySecretRef *v1.SecretKeySelector `json:"secretAccessKeySecretRef,omitempty" tf:"-"` + // Secret key of aws-compatible static key. + // +kubebuilder:validation:Optional + SecretAccessKeySecretRef *v1.SecretKeySelector `json:"secretAccessKeySecretRef,omitempty" tf:"-"` } - type KafkaConnectorInitParameters struct { + // +crossplane:generate:reference:type=KafkaCluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=KafkaCluster -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// Params for MirrorMaker2 connector. The structure is documented below. -ConnectorConfigMirrormaker []ConnectorConfigMirrormakerInitParameters `json:"connectorConfigMirrormaker,omitempty" tf:"connector_config_mirrormaker,omitempty"` + // Params for MirrorMaker2 connector. The structure is documented below. + ConnectorConfigMirrormaker []ConnectorConfigMirrormakerInitParameters `json:"connectorConfigMirrormaker,omitempty" tf:"connector_config_mirrormaker,omitempty"` -// Params for S3 Sink connector. The structure is documented below. -ConnectorConfigS3Sink []ConnectorConfigS3SinkInitParameters `json:"connectorConfigS3Sink,omitempty" tf:"connector_config_s3_sink,omitempty"` + // Params for S3 Sink connector. The structure is documented below. + ConnectorConfigS3Sink []ConnectorConfigS3SinkInitParameters `json:"connectorConfigS3Sink,omitempty" tf:"connector_config_s3_sink,omitempty"` -// The name of the connector. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the connector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Additional properties for connector. -// +mapType=granular -Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + // Additional properties for connector. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` -// The number of the connector's parallel working tasks. Default is the number of brokers -TasksMax *float64 `json:"tasksMax,omitempty" tf:"tasks_max,omitempty"` + // The number of the connector's parallel working tasks. Default is the number of brokers + TasksMax *float64 `json:"tasksMax,omitempty" tf:"tasks_max,omitempty"` } - type KafkaConnectorObservation struct { + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + // Params for MirrorMaker2 connector. The structure is documented below. + ConnectorConfigMirrormaker []ConnectorConfigMirrormakerObservation `json:"connectorConfigMirrormaker,omitempty" tf:"connector_config_mirrormaker,omitempty"` -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Params for MirrorMaker2 connector. The structure is documented below. -ConnectorConfigMirrormaker []ConnectorConfigMirrormakerObservation `json:"connectorConfigMirrormaker,omitempty" tf:"connector_config_mirrormaker,omitempty"` + // Params for S3 Sink connector. The structure is documented below. + ConnectorConfigS3Sink []ConnectorConfigS3SinkObservation `json:"connectorConfigS3Sink,omitempty" tf:"connector_config_s3_sink,omitempty"` -// Params for S3 Sink connector. The structure is documented below. -ConnectorConfigS3Sink []ConnectorConfigS3SinkObservation `json:"connectorConfigS3Sink,omitempty" tf:"connector_config_s3_sink,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The name of the connector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the connector. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Additional properties for connector. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` -// Additional properties for connector. -// +mapType=granular -Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` - -// The number of the connector's parallel working tasks. Default is the number of brokers -TasksMax *float64 `json:"tasksMax,omitempty" tf:"tasks_max,omitempty"` + // The number of the connector's parallel working tasks. Default is the number of brokers + TasksMax *float64 `json:"tasksMax,omitempty" tf:"tasks_max,omitempty"` } - type KafkaConnectorParameters struct { + // +crossplane:generate:reference:type=KafkaCluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=KafkaCluster -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + // Reference to a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Reference to a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Selector for a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// Selector for a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Params for MirrorMaker2 connector. The structure is documented below. + // +kubebuilder:validation:Optional + ConnectorConfigMirrormaker []ConnectorConfigMirrormakerParameters `json:"connectorConfigMirrormaker,omitempty" tf:"connector_config_mirrormaker,omitempty"` -// Params for MirrorMaker2 connector. The structure is documented below. -// +kubebuilder:validation:Optional -ConnectorConfigMirrormaker []ConnectorConfigMirrormakerParameters `json:"connectorConfigMirrormaker,omitempty" tf:"connector_config_mirrormaker,omitempty"` + // Params for S3 Sink connector. The structure is documented below. + // +kubebuilder:validation:Optional + ConnectorConfigS3Sink []ConnectorConfigS3SinkParameters `json:"connectorConfigS3Sink,omitempty" tf:"connector_config_s3_sink,omitempty"` -// Params for S3 Sink connector. The structure is documented below. -// +kubebuilder:validation:Optional -ConnectorConfigS3Sink []ConnectorConfigS3SinkParameters `json:"connectorConfigS3Sink,omitempty" tf:"connector_config_s3_sink,omitempty"` + // The name of the connector. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the connector. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Additional properties for connector. + // +kubebuilder:validation:Optional + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` -// Additional properties for connector. -// +kubebuilder:validation:Optional -// +mapType=granular -Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` - -// The number of the connector's parallel working tasks. Default is the number of brokers -// +kubebuilder:validation:Optional -TasksMax *float64 `json:"tasksMax,omitempty" tf:"tasks_max,omitempty"` + // The number of the connector's parallel working tasks. Default is the number of brokers + // +kubebuilder:validation:Optional + TasksMax *float64 `json:"tasksMax,omitempty" tf:"tasks_max,omitempty"` } - type S3ConnectionInitParameters struct { + // Name of the bucket in s3-compatible storage. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` -// Name of the bucket in s3-compatible storage. -BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` - -// Connection params for external s3-compatible storage. The structure is documented below. -ExternalS3 []ExternalS3InitParameters `json:"externalS3,omitempty" tf:"external_s3,omitempty"` + // Connection params for external s3-compatible storage. The structure is documented below. + ExternalS3 []ExternalS3InitParameters `json:"externalS3,omitempty" tf:"external_s3,omitempty"` } - type S3ConnectionObservation struct { + // Name of the bucket in s3-compatible storage. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` -// Name of the bucket in s3-compatible storage. -BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` - -// Connection params for external s3-compatible storage. The structure is documented below. -ExternalS3 []ExternalS3Observation `json:"externalS3,omitempty" tf:"external_s3,omitempty"` + // Connection params for external s3-compatible storage. The structure is documented below. + ExternalS3 []ExternalS3Observation `json:"externalS3,omitempty" tf:"external_s3,omitempty"` } - type S3ConnectionParameters struct { + // Name of the bucket in s3-compatible storage. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` -// Name of the bucket in s3-compatible storage. -// +kubebuilder:validation:Optional -BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` - -// Connection params for external s3-compatible storage. The structure is documented below. -// +kubebuilder:validation:Optional -ExternalS3 []ExternalS3Parameters `json:"externalS3" tf:"external_s3,omitempty"` + // Connection params for external s3-compatible storage. The structure is documented below. + // +kubebuilder:validation:Optional + ExternalS3 []ExternalS3Parameters `json:"externalS3" tf:"external_s3,omitempty"` } - type SourceClusterInitParameters struct { + // Name of the cluster. Used also as a topic prefix + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` -// Name of the cluster. Used also as a topic prefix -Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` - -// Connection params for external cluster -ExternalCluster []ExternalClusterInitParameters `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` + // Connection params for external cluster + ExternalCluster []ExternalClusterInitParameters `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` -// Using this section in the cluster definition (source or target) means it's this cluster -ThisCluster []ThisClusterInitParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` + // Using this section in the cluster definition (source or target) means it's this cluster + ThisCluster []ThisClusterInitParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` } - type SourceClusterObservation struct { + // Name of the cluster. Used also as a topic prefix + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` -// Name of the cluster. Used also as a topic prefix -Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` - -// Connection params for external cluster -ExternalCluster []ExternalClusterObservation `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` + // Connection params for external cluster + ExternalCluster []ExternalClusterObservation `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` -// Using this section in the cluster definition (source or target) means it's this cluster -ThisCluster []ThisClusterParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` + // Using this section in the cluster definition (source or target) means it's this cluster + ThisCluster []ThisClusterParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` } - type SourceClusterParameters struct { + // Name of the cluster. Used also as a topic prefix + // +kubebuilder:validation:Optional + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` -// Name of the cluster. Used also as a topic prefix -// +kubebuilder:validation:Optional -Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` - -// Connection params for external cluster -// +kubebuilder:validation:Optional -ExternalCluster []ExternalClusterParameters `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` + // Connection params for external cluster + // +kubebuilder:validation:Optional + ExternalCluster []ExternalClusterParameters `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` -// Using this section in the cluster definition (source or target) means it's this cluster -// +kubebuilder:validation:Optional -ThisCluster []ThisClusterParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` + // Using this section in the cluster definition (source or target) means it's this cluster + // +kubebuilder:validation:Optional + ThisCluster []ThisClusterParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` } - type TargetClusterExternalClusterInitParameters struct { + // List of bootstrap servers to connect to cluster + BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` -// List of bootstrap servers to connect to cluster -BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` + // Type of SASL authentification mechanism to use + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// Type of SASL authentification mechanism to use -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // Password to use in SASL authentification mechanism + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` -// Password to use in SASL authentification mechanism -SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + // Username to use in SASL authentification mechanism + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Username to use in SASL authentification mechanism -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` - -// Security protocol to use -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + // Security protocol to use + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` } - type TargetClusterExternalClusterObservation struct { + // List of bootstrap servers to connect to cluster + BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` -// List of bootstrap servers to connect to cluster -BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` + // Type of SASL authentification mechanism to use + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// Type of SASL authentification mechanism to use -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // Username to use in SASL authentification mechanism + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Username to use in SASL authentification mechanism -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` - -// Security protocol to use -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + // Security protocol to use + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` } - type TargetClusterExternalClusterParameters struct { + // List of bootstrap servers to connect to cluster + // +kubebuilder:validation:Optional + BootstrapServers *string `json:"bootstrapServers" tf:"bootstrap_servers,omitempty"` -// List of bootstrap servers to connect to cluster -// +kubebuilder:validation:Optional -BootstrapServers *string `json:"bootstrapServers" tf:"bootstrap_servers,omitempty"` + // Type of SASL authentification mechanism to use + // +kubebuilder:validation:Optional + SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` -// Type of SASL authentification mechanism to use -// +kubebuilder:validation:Optional -SaslMechanism *string `json:"saslMechanism,omitempty" tf:"sasl_mechanism,omitempty"` + // Password to use in SASL authentification mechanism + // +kubebuilder:validation:Optional + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` -// Password to use in SASL authentification mechanism -// +kubebuilder:validation:Optional -SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + // Username to use in SASL authentification mechanism + // +kubebuilder:validation:Optional + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` -// Username to use in SASL authentification mechanism -// +kubebuilder:validation:Optional -SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` - -// Security protocol to use -// +kubebuilder:validation:Optional -SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + // Security protocol to use + // +kubebuilder:validation:Optional + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` } - type TargetClusterInitParameters struct { + // Name of the cluster. Used also as a topic prefix + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` -// Name of the cluster. Used also as a topic prefix -Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + // Connection params for external cluster + ExternalCluster []TargetClusterExternalClusterInitParameters `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` -// Connection params for external cluster -ExternalCluster []TargetClusterExternalClusterInitParameters `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` - -// Using this section in the cluster definition (source or target) means it's this cluster -ThisCluster []TargetClusterThisClusterInitParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` + // Using this section in the cluster definition (source or target) means it's this cluster + ThisCluster []TargetClusterThisClusterInitParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` } - type TargetClusterObservation struct { + // Name of the cluster. Used also as a topic prefix + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` -// Name of the cluster. Used also as a topic prefix -Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` - -// Connection params for external cluster -ExternalCluster []TargetClusterExternalClusterObservation `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` + // Connection params for external cluster + ExternalCluster []TargetClusterExternalClusterObservation `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` -// Using this section in the cluster definition (source or target) means it's this cluster -ThisCluster []TargetClusterThisClusterParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` + // Using this section in the cluster definition (source or target) means it's this cluster + ThisCluster []TargetClusterThisClusterParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` } - type TargetClusterParameters struct { + // Name of the cluster. Used also as a topic prefix + // +kubebuilder:validation:Optional + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` -// Name of the cluster. Used also as a topic prefix -// +kubebuilder:validation:Optional -Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` - -// Connection params for external cluster -// +kubebuilder:validation:Optional -ExternalCluster []TargetClusterExternalClusterParameters `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` + // Connection params for external cluster + // +kubebuilder:validation:Optional + ExternalCluster []TargetClusterExternalClusterParameters `json:"externalCluster,omitempty" tf:"external_cluster,omitempty"` -// Using this section in the cluster definition (source or target) means it's this cluster -// +kubebuilder:validation:Optional -ThisCluster []TargetClusterThisClusterParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` + // Using this section in the cluster definition (source or target) means it's this cluster + // +kubebuilder:validation:Optional + ThisCluster []TargetClusterThisClusterParameters `json:"thisCluster,omitempty" tf:"this_cluster,omitempty"` } - type TargetClusterThisClusterInitParameters struct { - } - type TargetClusterThisClusterObservation struct { - } - type TargetClusterThisClusterParameters struct { - } - type ThisClusterInitParameters struct { - } - type ThisClusterObservation struct { - } - type ThisClusterParameters struct { - } // KafkaConnectorSpec defines the desired state of KafkaConnector type KafkaConnectorSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider KafkaConnectorParameters `json:"forProvider"` + ForProvider KafkaConnectorParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -564,20 +492,19 @@ type KafkaConnectorSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider KafkaConnectorInitParameters `json:"initProvider,omitempty"` + InitProvider KafkaConnectorInitParameters `json:"initProvider,omitempty"` } // KafkaConnectorStatus defines the observed state of KafkaConnector. type KafkaConnectorStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider KafkaConnectorObservation `json:"atProvider,omitempty"` + AtProvider KafkaConnectorObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // KafkaConnector is the Schema for the KafkaConnectors API. Manages a connectors of a Kafka cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -587,9 +514,9 @@ type KafkaConnectorStatus struct { type KafkaConnector struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec KafkaConnectorSpec `json:"spec"` - Status KafkaConnectorStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec KafkaConnectorSpec `json:"spec"` + Status KafkaConnectorStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_kafkatopic_terraformed.go b/apis/mdb/v1alpha1/zz_kafkatopic_terraformed.go new file mode 100755 index 0000000..8ca506f --- /dev/null +++ b/apis/mdb/v1alpha1/zz_kafkatopic_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this KafkaTopic +func (mg *KafkaTopic) GetTerraformResourceType() string { + return "yandex_mdb_kafka_topic" +} + +// GetConnectionDetailsMapping for this KafkaTopic +func (tr *KafkaTopic) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this KafkaTopic +func (tr *KafkaTopic) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this KafkaTopic +func (tr *KafkaTopic) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this KafkaTopic +func (tr *KafkaTopic) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this KafkaTopic +func (tr *KafkaTopic) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this KafkaTopic +func (tr *KafkaTopic) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this KafkaTopic +func (tr *KafkaTopic) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this KafkaTopic +func (tr *KafkaTopic) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this KafkaTopic using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *KafkaTopic) LateInitialize(attrs []byte) (bool, error) { + params := &KafkaTopicParameters_2{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *KafkaTopic) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_kafkatopic_types.go b/apis/mdb/v1alpha1/zz_kafkatopic_types.go index 644f9b6..312b9af 100755 --- a/apis/mdb/v1alpha1/zz_kafkatopic_types.go +++ b/apis/mdb/v1alpha1/zz_kafkatopic_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,206 +7,190 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type KafkaTopicInitParameters_2 struct { + // +crossplane:generate:reference:type=KafkaCluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=KafkaCluster -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the topic. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the topic. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The number of the topic's partitions. -Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` + // The number of the topic's partitions. + Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` -// Amount of data copies (replicas) for the topic in the cluster. -ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` + // Amount of data copies (replicas) for the topic in the cluster. + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` -// User-defined settings for the topic. The structure is documented below. -TopicConfig []KafkaTopicTopicConfigInitParameters `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` + // User-defined settings for the topic. The structure is documented below. + TopicConfig []KafkaTopicTopicConfigInitParameters `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` } - type KafkaTopicObservation_2 struct { + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// The name of the topic. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the topic. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The number of the topic's partitions. -Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` + // The number of the topic's partitions. + Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` -// Amount of data copies (replicas) for the topic in the cluster. -ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` + // Amount of data copies (replicas) for the topic in the cluster. + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` -// User-defined settings for the topic. The structure is documented below. -TopicConfig []KafkaTopicTopicConfigObservation `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` + // User-defined settings for the topic. The structure is documented below. + TopicConfig []KafkaTopicTopicConfigObservation `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` } - type KafkaTopicParameters_2 struct { + // +crossplane:generate:reference:type=KafkaCluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=KafkaCluster -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the topic. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the topic. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The number of the topic's partitions. -// +kubebuilder:validation:Optional -Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` + // The number of the topic's partitions. + // +kubebuilder:validation:Optional + Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` -// Amount of data copies (replicas) for the topic in the cluster. -// +kubebuilder:validation:Optional -ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` + // Amount of data copies (replicas) for the topic in the cluster. + // +kubebuilder:validation:Optional + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` -// User-defined settings for the topic. The structure is documented below. -// +kubebuilder:validation:Optional -TopicConfig []KafkaTopicTopicConfigParameters `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` + // User-defined settings for the topic. The structure is documented below. + // +kubebuilder:validation:Optional + TopicConfig []KafkaTopicTopicConfigParameters `json:"topicConfig,omitempty" tf:"topic_config,omitempty"` } - type KafkaTopicTopicConfigInitParameters struct { + // Kafka topic settings. For more information, see the official documentation and the Kafka documentation. + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// Kafka topic settings. For more information, see the official documentation and the Kafka documentation. -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` -CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` -DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` + FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` -FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` + FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` -FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` + FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` -FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` + MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` -MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` + MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` -MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` + MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` -MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` + Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` -Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` + RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` -RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` + RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` -RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` - -SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` + SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` } - type KafkaTopicTopicConfigObservation struct { + // Kafka topic settings. For more information, see the official documentation and the Kafka documentation. + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// Kafka topic settings. For more information, see the official documentation and the Kafka documentation. -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` -CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` -DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` + FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` -FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` + FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` -FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` + FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` -FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` + MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` -MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` + MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` -MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` + MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` -MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` + Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` -Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` + RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` -RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` + RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` -RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` - -SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` + SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` } - type KafkaTopicTopicConfigParameters struct { + // Kafka topic settings. For more information, see the official documentation and the Kafka documentation. + // +kubebuilder:validation:Optional + CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` -// Kafka topic settings. For more information, see the official documentation and the Kafka documentation. -// +kubebuilder:validation:Optional -CleanupPolicy *string `json:"cleanupPolicy,omitempty" tf:"cleanup_policy,omitempty"` + // +kubebuilder:validation:Optional + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` -// +kubebuilder:validation:Optional -CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + // +kubebuilder:validation:Optional + DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` -// +kubebuilder:validation:Optional -DeleteRetentionMs *string `json:"deleteRetentionMs,omitempty" tf:"delete_retention_ms,omitempty"` + // +kubebuilder:validation:Optional + FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` -// +kubebuilder:validation:Optional -FileDeleteDelayMs *string `json:"fileDeleteDelayMs,omitempty" tf:"file_delete_delay_ms,omitempty"` + // +kubebuilder:validation:Optional + FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` -// +kubebuilder:validation:Optional -FlushMessages *string `json:"flushMessages,omitempty" tf:"flush_messages,omitempty"` + // +kubebuilder:validation:Optional + FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` -// +kubebuilder:validation:Optional -FlushMs *string `json:"flushMs,omitempty" tf:"flush_ms,omitempty"` + // +kubebuilder:validation:Optional + MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` -// +kubebuilder:validation:Optional -MaxMessageBytes *string `json:"maxMessageBytes,omitempty" tf:"max_message_bytes,omitempty"` + // +kubebuilder:validation:Optional + MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` -// +kubebuilder:validation:Optional -MinCompactionLagMs *string `json:"minCompactionLagMs,omitempty" tf:"min_compaction_lag_ms,omitempty"` + // +kubebuilder:validation:Optional + MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` -// +kubebuilder:validation:Optional -MinInsyncReplicas *string `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` + // +kubebuilder:validation:Optional + Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` -// +kubebuilder:validation:Optional -Preallocate *bool `json:"preallocate,omitempty" tf:"preallocate,omitempty"` + // +kubebuilder:validation:Optional + RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` -// +kubebuilder:validation:Optional -RetentionBytes *string `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` + // +kubebuilder:validation:Optional + RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` -// +kubebuilder:validation:Optional -RetentionMs *string `json:"retentionMs,omitempty" tf:"retention_ms,omitempty"` - -// +kubebuilder:validation:Optional -SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` + // +kubebuilder:validation:Optional + SegmentBytes *string `json:"segmentBytes,omitempty" tf:"segment_bytes,omitempty"` } // KafkaTopicSpec defines the desired state of KafkaTopic type KafkaTopicSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider KafkaTopicParameters_2 `json:"forProvider"` + ForProvider KafkaTopicParameters_2 `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -219,20 +201,19 @@ type KafkaTopicSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider KafkaTopicInitParameters_2 `json:"initProvider,omitempty"` + InitProvider KafkaTopicInitParameters_2 `json:"initProvider,omitempty"` } // KafkaTopicStatus defines the observed state of KafkaTopic. type KafkaTopicStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider KafkaTopicObservation_2 `json:"atProvider,omitempty"` + AtProvider KafkaTopicObservation_2 `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // KafkaTopic is the Schema for the KafkaTopics API. Manages a topic of a Kafka cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -242,11 +223,11 @@ type KafkaTopicStatus struct { type KafkaTopic struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.partitions) || (has(self.initProvider) && has(self.initProvider.partitions))",message="spec.forProvider.partitions is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.replicationFactor) || (has(self.initProvider) && has(self.initProvider.replicationFactor))",message="spec.forProvider.replicationFactor is a required parameter" - Spec KafkaTopicSpec `json:"spec"` - Status KafkaTopicStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.partitions) || (has(self.initProvider) && has(self.initProvider.partitions))",message="spec.forProvider.partitions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.replicationFactor) || (has(self.initProvider) && has(self.initProvider.replicationFactor))",message="spec.forProvider.replicationFactor is a required parameter" + Spec KafkaTopicSpec `json:"spec"` + Status KafkaTopicStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_kafkauser_terraformed.go b/apis/mdb/v1alpha1/zz_kafkauser_terraformed.go new file mode 100755 index 0000000..3d2c56f --- /dev/null +++ b/apis/mdb/v1alpha1/zz_kafkauser_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this KafkaUser +func (mg *KafkaUser) GetTerraformResourceType() string { + return "yandex_mdb_kafka_user" +} + +// GetConnectionDetailsMapping for this KafkaUser +func (tr *KafkaUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "passwordSecretRef"} +} + +// GetObservation of this KafkaUser +func (tr *KafkaUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this KafkaUser +func (tr *KafkaUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this KafkaUser +func (tr *KafkaUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this KafkaUser +func (tr *KafkaUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this KafkaUser +func (tr *KafkaUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this KafkaUser +func (tr *KafkaUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this KafkaUser +func (tr *KafkaUser) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this KafkaUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *KafkaUser) LateInitialize(attrs []byte) (bool, error) { + params := &KafkaUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *KafkaUser) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_kafkauser_types.go b/apis/mdb/v1alpha1/zz_kafkauser_types.go index bd9ce6d..31cb336 100755 --- a/apis/mdb/v1alpha1/zz_kafkauser_types.go +++ b/apis/mdb/v1alpha1/zz_kafkauser_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,132 +7,116 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type KafkaUserInitParameters struct { + // +crossplane:generate:reference:type=KafkaCluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=KafkaCluster -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The password of the user. -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + // The password of the user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// Set of permissions granted to the user. The structure is documented below. -Permission []KafkaUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []KafkaUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type KafkaUserObservation struct { + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Set of permissions granted to the user. The structure is documented below. -Permission []KafkaUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []KafkaUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` } - type KafkaUserParameters struct { + // +crossplane:generate:reference:type=KafkaCluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=KafkaCluster -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a KafkaCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a KafkaCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The password of the user. -// +kubebuilder:validation:Optional -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// Set of permissions granted to the user. The structure is documented below. -// +kubebuilder:validation:Optional -Permission []KafkaUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + // +kubebuilder:validation:Optional + Permission []KafkaUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type KafkaUserPermissionInitParameters struct { + // Set of hosts, to which this permission grants access to. + // +listType=set + AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` -// Set of hosts, to which this permission grants access to. -// +listType=set -AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` + // The role type to grant to the topic. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role type to grant to the topic. -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -// The name of the topic that the permission grants access to. -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // The name of the topic that the permission grants access to. + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` } - type KafkaUserPermissionObservation struct { + // Set of hosts, to which this permission grants access to. + // +listType=set + AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` -// Set of hosts, to which this permission grants access to. -// +listType=set -AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` + // The role type to grant to the topic. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role type to grant to the topic. -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -// The name of the topic that the permission grants access to. -TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + // The name of the topic that the permission grants access to. + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` } - type KafkaUserPermissionParameters struct { + // Set of hosts, to which this permission grants access to. + // +kubebuilder:validation:Optional + // +listType=set + AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` -// Set of hosts, to which this permission grants access to. -// +kubebuilder:validation:Optional -// +listType=set -AllowHosts []*string `json:"allowHosts,omitempty" tf:"allow_hosts,omitempty"` + // The role type to grant to the topic. + // +kubebuilder:validation:Optional + Role *string `json:"role" tf:"role,omitempty"` -// The role type to grant to the topic. -// +kubebuilder:validation:Optional -Role *string `json:"role" tf:"role,omitempty"` - -// The name of the topic that the permission grants access to. -// +kubebuilder:validation:Optional -TopicName *string `json:"topicName" tf:"topic_name,omitempty"` + // The name of the topic that the permission grants access to. + // +kubebuilder:validation:Optional + TopicName *string `json:"topicName" tf:"topic_name,omitempty"` } // KafkaUserSpec defines the desired state of KafkaUser type KafkaUserSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider KafkaUserParameters `json:"forProvider"` + ForProvider KafkaUserParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -145,20 +127,19 @@ type KafkaUserSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider KafkaUserInitParameters `json:"initProvider,omitempty"` + InitProvider KafkaUserInitParameters `json:"initProvider,omitempty"` } // KafkaUserStatus defines the observed state of KafkaUser. type KafkaUserStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider KafkaUserObservation `json:"atProvider,omitempty"` + AtProvider KafkaUserObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // KafkaUser is the Schema for the KafkaUsers API. Manages a user of a Kafka cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -168,10 +149,10 @@ type KafkaUserStatus struct { type KafkaUser struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" - Spec KafkaUserSpec `json:"spec"` - Status KafkaUserStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" + Spec KafkaUserSpec `json:"spec"` + Status KafkaUserStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_mongodbcluster_terraformed.go b/apis/mdb/v1alpha1/zz_mongodbcluster_terraformed.go new file mode 100755 index 0000000..53331df --- /dev/null +++ b/apis/mdb/v1alpha1/zz_mongodbcluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MongodbCluster +func (mg *MongodbCluster) GetTerraformResourceType() string { + return "yandex_mdb_mongodb_cluster" +} + +// GetConnectionDetailsMapping for this MongodbCluster +func (tr *MongodbCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"user[*].password": "user[*].passwordSecretRef"} +} + +// GetObservation of this MongodbCluster +func (tr *MongodbCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MongodbCluster +func (tr *MongodbCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MongodbCluster +func (tr *MongodbCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MongodbCluster +func (tr *MongodbCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MongodbCluster +func (tr *MongodbCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MongodbCluster +func (tr *MongodbCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MongodbCluster +func (tr *MongodbCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MongodbCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MongodbCluster) LateInitialize(attrs []byte) (bool, error) { + params := &MongodbClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MongodbCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_mongodbcluster_types.go b/apis/mdb/v1alpha1/zz_mongodbcluster_types.go index 6c3567a..71f0626 100755 --- a/apis/mdb/v1alpha1/zz_mongodbcluster_types.go +++ b/apis/mdb/v1alpha1/zz_mongodbcluster_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,1926 +7,1695 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AuditLogInitParameters struct { + // Configuration of the audit log filter in JSON format. For more information see auditLog.filter description in the official documentation. Available only in enterprise edition. + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` -// Configuration of the audit log filter in JSON format. For more information see auditLog.filter description in the official documentation. Available only in enterprise edition. -Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` - -// Specifies if a node allows runtime configuration of audit filters and the auditAuthorizationSuccess variable. For more information see auditLog.runtimeConfiguration description in the official documentation. Available only in enterprise edition. -RuntimeConfiguration *bool `json:"runtimeConfiguration,omitempty" tf:"runtime_configuration,omitempty"` + // Specifies if a node allows runtime configuration of audit filters and the auditAuthorizationSuccess variable. For more information see auditLog.runtimeConfiguration description in the official documentation. Available only in enterprise edition. + RuntimeConfiguration *bool `json:"runtimeConfiguration,omitempty" tf:"runtime_configuration,omitempty"` } - type AuditLogObservation struct { + // Configuration of the audit log filter in JSON format. For more information see auditLog.filter description in the official documentation. Available only in enterprise edition. + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` -// Configuration of the audit log filter in JSON format. For more information see auditLog.filter description in the official documentation. Available only in enterprise edition. -Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` - -// Specifies if a node allows runtime configuration of audit filters and the auditAuthorizationSuccess variable. For more information see auditLog.runtimeConfiguration description in the official documentation. Available only in enterprise edition. -RuntimeConfiguration *bool `json:"runtimeConfiguration,omitempty" tf:"runtime_configuration,omitempty"` + // Specifies if a node allows runtime configuration of audit filters and the auditAuthorizationSuccess variable. For more information see auditLog.runtimeConfiguration description in the official documentation. Available only in enterprise edition. + RuntimeConfiguration *bool `json:"runtimeConfiguration,omitempty" tf:"runtime_configuration,omitempty"` } - type AuditLogParameters struct { + // Configuration of the audit log filter in JSON format. For more information see auditLog.filter description in the official documentation. Available only in enterprise edition. + // +kubebuilder:validation:Optional + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` -// Configuration of the audit log filter in JSON format. For more information see auditLog.filter description in the official documentation. Available only in enterprise edition. -// +kubebuilder:validation:Optional -Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` - -// Specifies if a node allows runtime configuration of audit filters and the auditAuthorizationSuccess variable. For more information see auditLog.runtimeConfiguration description in the official documentation. Available only in enterprise edition. -// +kubebuilder:validation:Optional -RuntimeConfiguration *bool `json:"runtimeConfiguration,omitempty" tf:"runtime_configuration,omitempty"` + // Specifies if a node allows runtime configuration of audit filters and the auditAuthorizationSuccess variable. For more information see auditLog.runtimeConfiguration description in the official documentation. Available only in enterprise edition. + // +kubebuilder:validation:Optional + RuntimeConfiguration *bool `json:"runtimeConfiguration,omitempty" tf:"runtime_configuration,omitempty"` } - type ClusterConfigAccessInitParameters struct { + // Allow access for Yandex DataLens. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for Yandex DataLens. -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` - -// Allow access for DataTransfer -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` } - type ClusterConfigAccessObservation struct { + // Allow access for Yandex DataLens. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for Yandex DataLens. -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` - -// Allow access for DataTransfer -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` } - type ClusterConfigAccessParameters struct { + // Allow access for Yandex DataLens. + // +kubebuilder:validation:Optional + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for Yandex DataLens. -// +kubebuilder:validation:Optional -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` - -// Allow access for DataTransfer -// +kubebuilder:validation:Optional -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + // +kubebuilder:validation:Optional + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` } - type ClusterConfigBackupWindowStartInitParameters struct { + // The hour at which backup will be started. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started. -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started. -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type ClusterConfigBackupWindowStartObservation struct { + // The hour at which backup will be started. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started. -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started. -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type ClusterConfigBackupWindowStartParameters struct { + // The hour at which backup will be started. + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started. -// +kubebuilder:validation:Optional -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started. -// +kubebuilder:validation:Optional -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started. + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type ClusterConfigInitParameters struct { + // Access policy to the MongoDB cluster. The structure is documented below. + Access []ClusterConfigAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the MongoDB cluster. The structure is documented below. -Access []ClusterConfigAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` - -// Retain period of automatically created backup in days. -BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + // Retain period of automatically created backup in days. + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` -// Time to start the daily backup, in the UTC timezone. The structure is documented below. -BackupWindowStart []ClusterConfigBackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + BackupWindowStart []ClusterConfigBackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Feature compatibility version of MongoDB. If not provided version is taken. Can be either 6.0, 5.0, 4.4 and 4.2. -FeatureCompatibilityVersion *string `json:"featureCompatibilityVersion,omitempty" tf:"feature_compatibility_version,omitempty"` + // Feature compatibility version of MongoDB. If not provided version is taken. Can be either 6.0, 5.0, 4.4 and 4.2. + FeatureCompatibilityVersion *string `json:"featureCompatibilityVersion,omitempty" tf:"feature_compatibility_version,omitempty"` -// Configuration of the mongocfg service. The structure is documented below. -Mongocfg []MongocfgInitParameters `json:"mongocfg,omitempty" tf:"mongocfg,omitempty"` + // Configuration of the mongocfg service. The structure is documented below. + Mongocfg []MongocfgInitParameters `json:"mongocfg,omitempty" tf:"mongocfg,omitempty"` -// Configuration of the mongod service. The structure is documented below. -Mongod []MongodInitParameters `json:"mongod,omitempty" tf:"mongod,omitempty"` + // Configuration of the mongod service. The structure is documented below. + Mongod []MongodInitParameters `json:"mongod,omitempty" tf:"mongod,omitempty"` -// Configuration of the mongos service. The structure is documented below. -Mongos []MongosInitParameters `json:"mongos,omitempty" tf:"mongos,omitempty"` + // Configuration of the mongos service. The structure is documented below. + Mongos []MongosInitParameters `json:"mongos,omitempty" tf:"mongos,omitempty"` -// Performance diagnostics to the MongoDB cluster. The structure is documented below. -PerformanceDiagnostics []PerformanceDiagnosticsInitParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` + // Performance diagnostics to the MongoDB cluster. The structure is documented below. + PerformanceDiagnostics []PerformanceDiagnosticsInitParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` -// Version of the MongoDB server software. Can be either 4.2, 4.4, 4.4-enterprise, 5.0, 5.0-enterprise, 6.0 and 6.0-enterprise. -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of the MongoDB server software. Can be either 4.2, 4.4, 4.4-enterprise, 5.0, 5.0-enterprise, 6.0 and 6.0-enterprise. + Version *string `json:"version,omitempty" tf:"version,omitempty"` } - type ClusterConfigObservation struct { + // Access policy to the MongoDB cluster. The structure is documented below. + Access []ClusterConfigAccessObservation `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the MongoDB cluster. The structure is documented below. -Access []ClusterConfigAccessObservation `json:"access,omitempty" tf:"access,omitempty"` - -// Retain period of automatically created backup in days. -BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + // Retain period of automatically created backup in days. + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` -// Time to start the daily backup, in the UTC timezone. The structure is documented below. -BackupWindowStart []ClusterConfigBackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + BackupWindowStart []ClusterConfigBackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Feature compatibility version of MongoDB. If not provided version is taken. Can be either 6.0, 5.0, 4.4 and 4.2. -FeatureCompatibilityVersion *string `json:"featureCompatibilityVersion,omitempty" tf:"feature_compatibility_version,omitempty"` + // Feature compatibility version of MongoDB. If not provided version is taken. Can be either 6.0, 5.0, 4.4 and 4.2. + FeatureCompatibilityVersion *string `json:"featureCompatibilityVersion,omitempty" tf:"feature_compatibility_version,omitempty"` -// Configuration of the mongocfg service. The structure is documented below. -Mongocfg []MongocfgObservation `json:"mongocfg,omitempty" tf:"mongocfg,omitempty"` + // Configuration of the mongocfg service. The structure is documented below. + Mongocfg []MongocfgObservation `json:"mongocfg,omitempty" tf:"mongocfg,omitempty"` -// Configuration of the mongod service. The structure is documented below. -Mongod []MongodObservation `json:"mongod,omitempty" tf:"mongod,omitempty"` + // Configuration of the mongod service. The structure is documented below. + Mongod []MongodObservation `json:"mongod,omitempty" tf:"mongod,omitempty"` -// Configuration of the mongos service. The structure is documented below. -Mongos []MongosObservation `json:"mongos,omitempty" tf:"mongos,omitempty"` + // Configuration of the mongos service. The structure is documented below. + Mongos []MongosObservation `json:"mongos,omitempty" tf:"mongos,omitempty"` -// Performance diagnostics to the MongoDB cluster. The structure is documented below. -PerformanceDiagnostics []PerformanceDiagnosticsObservation `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` + // Performance diagnostics to the MongoDB cluster. The structure is documented below. + PerformanceDiagnostics []PerformanceDiagnosticsObservation `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` -// Version of the MongoDB server software. Can be either 4.2, 4.4, 4.4-enterprise, 5.0, 5.0-enterprise, 6.0 and 6.0-enterprise. -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of the MongoDB server software. Can be either 4.2, 4.4, 4.4-enterprise, 5.0, 5.0-enterprise, 6.0 and 6.0-enterprise. + Version *string `json:"version,omitempty" tf:"version,omitempty"` } - type ClusterConfigParameters struct { + // Access policy to the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Access []ClusterConfigAccessParameters `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Access []ClusterConfigAccessParameters `json:"access,omitempty" tf:"access,omitempty"` - -// Retain period of automatically created backup in days. -// +kubebuilder:validation:Optional -BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + // Retain period of automatically created backup in days. + // +kubebuilder:validation:Optional + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` -// Time to start the daily backup, in the UTC timezone. The structure is documented below. -// +kubebuilder:validation:Optional -BackupWindowStart []ClusterConfigBackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + // +kubebuilder:validation:Optional + BackupWindowStart []ClusterConfigBackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Feature compatibility version of MongoDB. If not provided version is taken. Can be either 6.0, 5.0, 4.4 and 4.2. -// +kubebuilder:validation:Optional -FeatureCompatibilityVersion *string `json:"featureCompatibilityVersion,omitempty" tf:"feature_compatibility_version,omitempty"` + // Feature compatibility version of MongoDB. If not provided version is taken. Can be either 6.0, 5.0, 4.4 and 4.2. + // +kubebuilder:validation:Optional + FeatureCompatibilityVersion *string `json:"featureCompatibilityVersion,omitempty" tf:"feature_compatibility_version,omitempty"` -// Configuration of the mongocfg service. The structure is documented below. -// +kubebuilder:validation:Optional -Mongocfg []MongocfgParameters `json:"mongocfg,omitempty" tf:"mongocfg,omitempty"` + // Configuration of the mongocfg service. The structure is documented below. + // +kubebuilder:validation:Optional + Mongocfg []MongocfgParameters `json:"mongocfg,omitempty" tf:"mongocfg,omitempty"` -// Configuration of the mongod service. The structure is documented below. -// +kubebuilder:validation:Optional -Mongod []MongodParameters `json:"mongod,omitempty" tf:"mongod,omitempty"` + // Configuration of the mongod service. The structure is documented below. + // +kubebuilder:validation:Optional + Mongod []MongodParameters `json:"mongod,omitempty" tf:"mongod,omitempty"` -// Configuration of the mongos service. The structure is documented below. -// +kubebuilder:validation:Optional -Mongos []MongosParameters `json:"mongos,omitempty" tf:"mongos,omitempty"` + // Configuration of the mongos service. The structure is documented below. + // +kubebuilder:validation:Optional + Mongos []MongosParameters `json:"mongos,omitempty" tf:"mongos,omitempty"` -// Performance diagnostics to the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -PerformanceDiagnostics []PerformanceDiagnosticsParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` + // Performance diagnostics to the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + PerformanceDiagnostics []PerformanceDiagnosticsParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` -// Version of the MongoDB server software. Can be either 4.2, 4.4, 4.4-enterprise, 5.0, 5.0-enterprise, 6.0 and 6.0-enterprise. -// +kubebuilder:validation:Optional -Version *string `json:"version" tf:"version,omitempty"` + // Version of the MongoDB server software. Can be either 4.2, 4.4, 4.4-enterprise, 5.0, 5.0-enterprise, 6.0 and 6.0-enterprise. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` } - type DiskSizeAutoscalingMongocfgInitParameters struct { + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Immediate autoscaling disk usage (percent). -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` - -// Maintenance window autoscaling disk usage (percent). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongocfgObservation struct { + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Immediate autoscaling disk usage (percent). -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` - -// Maintenance window autoscaling disk usage (percent). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongocfgParameters struct { + // Limit of disk size after autoscaling (GiB). + // +kubebuilder:validation:Optional + DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -// +kubebuilder:validation:Optional -DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` + // Immediate autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Immediate autoscaling disk usage (percent). -// +kubebuilder:validation:Optional -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` - -// Maintenance window autoscaling disk usage (percent). -// +kubebuilder:validation:Optional -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongodInitParameters struct { + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Immediate autoscaling disk usage (percent). -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` - -// Maintenance window autoscaling disk usage (percent). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongodObservation struct { + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Immediate autoscaling disk usage (percent). -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` - -// Maintenance window autoscaling disk usage (percent). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongodParameters struct { + // Limit of disk size after autoscaling (GiB). + // +kubebuilder:validation:Optional + DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -// +kubebuilder:validation:Optional -DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` + // Immediate autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Immediate autoscaling disk usage (percent). -// +kubebuilder:validation:Optional -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` - -// Maintenance window autoscaling disk usage (percent). -// +kubebuilder:validation:Optional -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongoinfraInitParameters struct { + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Immediate autoscaling disk usage (percent). -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` - -// Maintenance window autoscaling disk usage (percent). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongoinfraObservation struct { + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` - -// Immediate autoscaling disk usage (percent). -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Maintenance window autoscaling disk usage (percent). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongoinfraParameters struct { + // Limit of disk size after autoscaling (GiB). + // +kubebuilder:validation:Optional + DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -// +kubebuilder:validation:Optional -DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` - -// Immediate autoscaling disk usage (percent). -// +kubebuilder:validation:Optional -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + // Immediate autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Maintenance window autoscaling disk usage (percent). -// +kubebuilder:validation:Optional -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongosInitParameters struct { + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` - -// Immediate autoscaling disk usage (percent). -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Maintenance window autoscaling disk usage (percent). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongosObservation struct { + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` - -// Immediate autoscaling disk usage (percent). -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Maintenance window autoscaling disk usage (percent). -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type DiskSizeAutoscalingMongosParameters struct { + // Limit of disk size after autoscaling (GiB). + // +kubebuilder:validation:Optional + DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` -// Limit of disk size after autoscaling (GiB). -// +kubebuilder:validation:Optional -DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` + // Immediate autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` -// Immediate autoscaling disk usage (percent). -// +kubebuilder:validation:Optional -EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` - -// Maintenance window autoscaling disk usage (percent). -// +kubebuilder:validation:Optional -PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` + // Maintenance window autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` } - type HostParametersInitParameters struct { + // Should this host be hidden in replicaset. Can be either true of false. For more information see the official documentation + Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` -// Should this host be hidden in replicaset. Can be either true of false. For more information see the official documentation -Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` + // A floating point number that indicates the relative likelihood of a replica set member to become the primary. For more information see the official documentation + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// A floating point number that indicates the relative likelihood of a replica set member to become the primary. For more information see the official documentation -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // The number of seconds "behind" the primary that this replica set member should "lag". For more information see the official documentation + SecondaryDelaySecs *float64 `json:"secondaryDelaySecs,omitempty" tf:"secondary_delay_secs,omitempty"` -// The number of seconds "behind" the primary that this replica set member should "lag". For more information see the official documentation -SecondaryDelaySecs *float64 `json:"secondaryDelaySecs,omitempty" tf:"secondary_delay_secs,omitempty"` - -// A set of key/value pairs to assign for the replica set member. For more information see the official documentation -// +mapType=granular -Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + // A set of key/value pairs to assign for the replica set member. For more information see the official documentation + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } - type HostParametersObservation struct { + // Should this host be hidden in replicaset. Can be either true of false. For more information see the official documentation + Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` -// Should this host be hidden in replicaset. Can be either true of false. For more information see the official documentation -Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` + // A floating point number that indicates the relative likelihood of a replica set member to become the primary. For more information see the official documentation + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// A floating point number that indicates the relative likelihood of a replica set member to become the primary. For more information see the official documentation -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // The number of seconds "behind" the primary that this replica set member should "lag". For more information see the official documentation + SecondaryDelaySecs *float64 `json:"secondaryDelaySecs,omitempty" tf:"secondary_delay_secs,omitempty"` -// The number of seconds "behind" the primary that this replica set member should "lag". For more information see the official documentation -SecondaryDelaySecs *float64 `json:"secondaryDelaySecs,omitempty" tf:"secondary_delay_secs,omitempty"` - -// A set of key/value pairs to assign for the replica set member. For more information see the official documentation -// +mapType=granular -Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + // A set of key/value pairs to assign for the replica set member. For more information see the official documentation + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } - type HostParametersParameters struct { + // Should this host be hidden in replicaset. Can be either true of false. For more information see the official documentation + // +kubebuilder:validation:Optional + Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` -// Should this host be hidden in replicaset. Can be either true of false. For more information see the official documentation -// +kubebuilder:validation:Optional -Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` - -// A floating point number that indicates the relative likelihood of a replica set member to become the primary. For more information see the official documentation -// +kubebuilder:validation:Optional -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // A floating point number that indicates the relative likelihood of a replica set member to become the primary. For more information see the official documentation + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// The number of seconds "behind" the primary that this replica set member should "lag". For more information see the official documentation -// +kubebuilder:validation:Optional -SecondaryDelaySecs *float64 `json:"secondaryDelaySecs,omitempty" tf:"secondary_delay_secs,omitempty"` + // The number of seconds "behind" the primary that this replica set member should "lag". For more information see the official documentation + // +kubebuilder:validation:Optional + SecondaryDelaySecs *float64 `json:"secondaryDelaySecs,omitempty" tf:"secondary_delay_secs,omitempty"` -// A set of key/value pairs to assign for the replica set member. For more information see the official documentation -// +kubebuilder:validation:Optional -// +mapType=granular -Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + // A set of key/value pairs to assign for the replica set member. For more information see the official documentation + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` } - type JournalInitParameters struct { - -// The maximum amount of time in milliseconds that the mongod process allows between journal operations. For more information, see the storage.journal.commitIntervalMs description in the official documentation. -CommitInterval *float64 `json:"commitInterval,omitempty" tf:"commit_interval,omitempty"` + // The maximum amount of time in milliseconds that the mongod process allows between journal operations. For more information, see the storage.journal.commitIntervalMs description in the official documentation. + CommitInterval *float64 `json:"commitInterval,omitempty" tf:"commit_interval,omitempty"` } - type JournalObservation struct { - -// The maximum amount of time in milliseconds that the mongod process allows between journal operations. For more information, see the storage.journal.commitIntervalMs description in the official documentation. -CommitInterval *float64 `json:"commitInterval,omitempty" tf:"commit_interval,omitempty"` + // The maximum amount of time in milliseconds that the mongod process allows between journal operations. For more information, see the storage.journal.commitIntervalMs description in the official documentation. + CommitInterval *float64 `json:"commitInterval,omitempty" tf:"commit_interval,omitempty"` } - type JournalParameters struct { - -// The maximum amount of time in milliseconds that the mongod process allows between journal operations. For more information, see the storage.journal.commitIntervalMs description in the official documentation. -// +kubebuilder:validation:Optional -CommitInterval *float64 `json:"commitInterval,omitempty" tf:"commit_interval,omitempty"` + // The maximum amount of time in milliseconds that the mongod process allows between journal operations. For more information, see the storage.journal.commitIntervalMs description in the official documentation. + // +kubebuilder:validation:Optional + CommitInterval *float64 `json:"commitInterval,omitempty" tf:"commit_interval,omitempty"` } - type KmipInitParameters struct { + // String containing the client certificate used for authenticating MongoDB to the KMIP server. For more information see security.kmip.clientCertificateFile description in the official documentation. + ClientCertificate *string `json:"clientCertificate,omitempty" tf:"client_certificate,omitempty"` -// String containing the client certificate used for authenticating MongoDB to the KMIP server. For more information see security.kmip.clientCertificateFile description in the official documentation. -ClientCertificate *string `json:"clientCertificate,omitempty" tf:"client_certificate,omitempty"` - -// Unique KMIP identifier for an existing key within the KMIP server. For more information see security.kmip.keyIdentifier description in the official documentation. -KeyIdentifier *string `json:"keyIdentifier,omitempty" tf:"key_identifier,omitempty"` + // Unique KMIP identifier for an existing key within the KMIP server. For more information see security.kmip.keyIdentifier description in the official documentation. + KeyIdentifier *string `json:"keyIdentifier,omitempty" tf:"key_identifier,omitempty"` -// Port number to use to communicate with the KMIP server. Default: 5696 For more information see security.kmip.port description in the official documentation. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port number to use to communicate with the KMIP server. Default: 5696 For more information see security.kmip.port description in the official documentation. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Path to CA File. Used for validating secure client connection to KMIP server. For more information see security.kmip.serverCAFile description in the official documentation. -ServerCA *string `json:"serverCa,omitempty" tf:"server_ca,omitempty"` + // Path to CA File. Used for validating secure client connection to KMIP server. For more information see security.kmip.serverCAFile description in the official documentation. + ServerCA *string `json:"serverCa,omitempty" tf:"server_ca,omitempty"` -// Hostname or IP address of the KMIP server to connect to. For more information see security.kmip.serverName description in the official documentation. -ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` + // Hostname or IP address of the KMIP server to connect to. For more information see security.kmip.serverName description in the official documentation. + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` } - type KmipObservation struct { + // String containing the client certificate used for authenticating MongoDB to the KMIP server. For more information see security.kmip.clientCertificateFile description in the official documentation. + ClientCertificate *string `json:"clientCertificate,omitempty" tf:"client_certificate,omitempty"` -// String containing the client certificate used for authenticating MongoDB to the KMIP server. For more information see security.kmip.clientCertificateFile description in the official documentation. -ClientCertificate *string `json:"clientCertificate,omitempty" tf:"client_certificate,omitempty"` - -// Unique KMIP identifier for an existing key within the KMIP server. For more information see security.kmip.keyIdentifier description in the official documentation. -KeyIdentifier *string `json:"keyIdentifier,omitempty" tf:"key_identifier,omitempty"` + // Unique KMIP identifier for an existing key within the KMIP server. For more information see security.kmip.keyIdentifier description in the official documentation. + KeyIdentifier *string `json:"keyIdentifier,omitempty" tf:"key_identifier,omitempty"` -// Port number to use to communicate with the KMIP server. Default: 5696 For more information see security.kmip.port description in the official documentation. -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port number to use to communicate with the KMIP server. Default: 5696 For more information see security.kmip.port description in the official documentation. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Path to CA File. Used for validating secure client connection to KMIP server. For more information see security.kmip.serverCAFile description in the official documentation. -ServerCA *string `json:"serverCa,omitempty" tf:"server_ca,omitempty"` + // Path to CA File. Used for validating secure client connection to KMIP server. For more information see security.kmip.serverCAFile description in the official documentation. + ServerCA *string `json:"serverCa,omitempty" tf:"server_ca,omitempty"` -// Hostname or IP address of the KMIP server to connect to. For more information see security.kmip.serverName description in the official documentation. -ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` + // Hostname or IP address of the KMIP server to connect to. For more information see security.kmip.serverName description in the official documentation. + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` } - type KmipParameters struct { + // String containing the client certificate used for authenticating MongoDB to the KMIP server. For more information see security.kmip.clientCertificateFile description in the official documentation. + // +kubebuilder:validation:Optional + ClientCertificate *string `json:"clientCertificate,omitempty" tf:"client_certificate,omitempty"` -// String containing the client certificate used for authenticating MongoDB to the KMIP server. For more information see security.kmip.clientCertificateFile description in the official documentation. -// +kubebuilder:validation:Optional -ClientCertificate *string `json:"clientCertificate,omitempty" tf:"client_certificate,omitempty"` - -// Unique KMIP identifier for an existing key within the KMIP server. For more information see security.kmip.keyIdentifier description in the official documentation. -// +kubebuilder:validation:Optional -KeyIdentifier *string `json:"keyIdentifier,omitempty" tf:"key_identifier,omitempty"` + // Unique KMIP identifier for an existing key within the KMIP server. For more information see security.kmip.keyIdentifier description in the official documentation. + // +kubebuilder:validation:Optional + KeyIdentifier *string `json:"keyIdentifier,omitempty" tf:"key_identifier,omitempty"` -// Port number to use to communicate with the KMIP server. Default: 5696 For more information see security.kmip.port description in the official documentation. -// +kubebuilder:validation:Optional -Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + // Port number to use to communicate with the KMIP server. Default: 5696 For more information see security.kmip.port description in the official documentation. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` -// Path to CA File. Used for validating secure client connection to KMIP server. For more information see security.kmip.serverCAFile description in the official documentation. -// +kubebuilder:validation:Optional -ServerCA *string `json:"serverCa,omitempty" tf:"server_ca,omitempty"` + // Path to CA File. Used for validating secure client connection to KMIP server. For more information see security.kmip.serverCAFile description in the official documentation. + // +kubebuilder:validation:Optional + ServerCA *string `json:"serverCa,omitempty" tf:"server_ca,omitempty"` -// Hostname or IP address of the KMIP server to connect to. For more information see security.kmip.serverName description in the official documentation. -// +kubebuilder:validation:Optional -ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` + // Hostname or IP address of the KMIP server to connect to. For more information see security.kmip.serverName description in the official documentation. + // +kubebuilder:validation:Optional + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` } - type MongocfgInitParameters struct { + // A set of network settings (see the net option). The structure is documented below. + Net []NetInitParameters `json:"net,omitempty" tf:"net,omitempty"` -// A set of network settings (see the net option). The structure is documented below. -Net []NetInitParameters `json:"net,omitempty" tf:"net,omitempty"` - -// A set of profiling settings (see the operationProfiling option). The structure is documented below. -OperationProfiling []OperationProfilingInitParameters `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` + // A set of profiling settings (see the operationProfiling option). The structure is documented below. + OperationProfiling []OperationProfilingInitParameters `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` -// A set of storage settings (see the storage option). The structure is documented below. -Storage []StorageInitParameters `json:"storage,omitempty" tf:"storage,omitempty"` + // A set of storage settings (see the storage option). The structure is documented below. + Storage []StorageInitParameters `json:"storage,omitempty" tf:"storage,omitempty"` } - type MongocfgObservation struct { + // A set of network settings (see the net option). The structure is documented below. + Net []NetObservation `json:"net,omitempty" tf:"net,omitempty"` -// A set of network settings (see the net option). The structure is documented below. -Net []NetObservation `json:"net,omitempty" tf:"net,omitempty"` + // A set of profiling settings (see the operationProfiling option). The structure is documented below. + OperationProfiling []OperationProfilingObservation `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` -// A set of profiling settings (see the operationProfiling option). The structure is documented below. -OperationProfiling []OperationProfilingObservation `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` - -// A set of storage settings (see the storage option). The structure is documented below. -Storage []StorageObservation `json:"storage,omitempty" tf:"storage,omitempty"` + // A set of storage settings (see the storage option). The structure is documented below. + Storage []StorageObservation `json:"storage,omitempty" tf:"storage,omitempty"` } - type MongocfgParameters struct { + // A set of network settings (see the net option). The structure is documented below. + // +kubebuilder:validation:Optional + Net []NetParameters `json:"net,omitempty" tf:"net,omitempty"` -// A set of network settings (see the net option). The structure is documented below. -// +kubebuilder:validation:Optional -Net []NetParameters `json:"net,omitempty" tf:"net,omitempty"` + // A set of profiling settings (see the operationProfiling option). The structure is documented below. + // +kubebuilder:validation:Optional + OperationProfiling []OperationProfilingParameters `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` -// A set of profiling settings (see the operationProfiling option). The structure is documented below. -// +kubebuilder:validation:Optional -OperationProfiling []OperationProfilingParameters `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` - -// A set of storage settings (see the storage option). The structure is documented below. -// +kubebuilder:validation:Optional -Storage []StorageParameters `json:"storage,omitempty" tf:"storage,omitempty"` + // A set of storage settings (see the storage option). The structure is documented below. + // +kubebuilder:validation:Optional + Storage []StorageParameters `json:"storage,omitempty" tf:"storage,omitempty"` } - type MongodInitParameters struct { + // A set of audit log settings (see the auditLog option). The structure is documented below. Available only in enterprise edition. + AuditLog []AuditLogInitParameters `json:"auditLog,omitempty" tf:"audit_log,omitempty"` -// A set of audit log settings (see the auditLog option). The structure is documented below. Available only in enterprise edition. -AuditLog []AuditLogInitParameters `json:"auditLog,omitempty" tf:"audit_log,omitempty"` + // A set of network settings (see the net option). The structure is documented below. + Net []MongodNetInitParameters `json:"net,omitempty" tf:"net,omitempty"` -// A set of network settings (see the net option). The structure is documented below. -Net []MongodNetInitParameters `json:"net,omitempty" tf:"net,omitempty"` + // A set of profiling settings (see the operationProfiling option). The structure is documented below. + OperationProfiling []MongodOperationProfilingInitParameters `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` -// A set of profiling settings (see the operationProfiling option). The structure is documented below. -OperationProfiling []MongodOperationProfilingInitParameters `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` + // A set of MongoDB Security settings (see the security option). The structure is documented below. Available only in enterprise edition. + Security []SecurityInitParameters `json:"security,omitempty" tf:"security,omitempty"` -// A set of MongoDB Security settings (see the security option). The structure is documented below. Available only in enterprise edition. -Security []SecurityInitParameters `json:"security,omitempty" tf:"security,omitempty"` + // A set of MongoDB Server Parameters (see the setParameter option). The structure is documented below. + SetParameter []SetParameterInitParameters `json:"setParameter,omitempty" tf:"set_parameter,omitempty"` -// A set of MongoDB Server Parameters (see the setParameter option). The structure is documented below. -SetParameter []SetParameterInitParameters `json:"setParameter,omitempty" tf:"set_parameter,omitempty"` - -// A set of storage settings (see the storage option). The structure is documented below. -Storage []MongodStorageInitParameters `json:"storage,omitempty" tf:"storage,omitempty"` + // A set of storage settings (see the storage option). The structure is documented below. + Storage []MongodStorageInitParameters `json:"storage,omitempty" tf:"storage,omitempty"` } - type MongodNetInitParameters struct { + // Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. + Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` -// Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. -Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` - -// The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. -MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` + // The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. + MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` } - type MongodNetObservation struct { + // Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. + Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` -// Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. -Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` - -// The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. -MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` + // The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. + MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` } - type MongodNetParameters struct { + // Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. + // +kubebuilder:validation:Optional + Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` -// Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. -// +kubebuilder:validation:Optional -Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` - -// The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. -// +kubebuilder:validation:Optional -MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` + // The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. + // +kubebuilder:validation:Optional + MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` } - type MongodObservation struct { + // A set of audit log settings (see the auditLog option). The structure is documented below. Available only in enterprise edition. + AuditLog []AuditLogObservation `json:"auditLog,omitempty" tf:"audit_log,omitempty"` -// A set of audit log settings (see the auditLog option). The structure is documented below. Available only in enterprise edition. -AuditLog []AuditLogObservation `json:"auditLog,omitempty" tf:"audit_log,omitempty"` - -// A set of network settings (see the net option). The structure is documented below. -Net []MongodNetObservation `json:"net,omitempty" tf:"net,omitempty"` + // A set of network settings (see the net option). The structure is documented below. + Net []MongodNetObservation `json:"net,omitempty" tf:"net,omitempty"` -// A set of profiling settings (see the operationProfiling option). The structure is documented below. -OperationProfiling []MongodOperationProfilingObservation `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` + // A set of profiling settings (see the operationProfiling option). The structure is documented below. + OperationProfiling []MongodOperationProfilingObservation `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` -// A set of MongoDB Security settings (see the security option). The structure is documented below. Available only in enterprise edition. -Security []SecurityObservation `json:"security,omitempty" tf:"security,omitempty"` + // A set of MongoDB Security settings (see the security option). The structure is documented below. Available only in enterprise edition. + Security []SecurityObservation `json:"security,omitempty" tf:"security,omitempty"` -// A set of MongoDB Server Parameters (see the setParameter option). The structure is documented below. -SetParameter []SetParameterObservation `json:"setParameter,omitempty" tf:"set_parameter,omitempty"` + // A set of MongoDB Server Parameters (see the setParameter option). The structure is documented below. + SetParameter []SetParameterObservation `json:"setParameter,omitempty" tf:"set_parameter,omitempty"` -// A set of storage settings (see the storage option). The structure is documented below. -Storage []MongodStorageObservation `json:"storage,omitempty" tf:"storage,omitempty"` + // A set of storage settings (see the storage option). The structure is documented below. + Storage []MongodStorageObservation `json:"storage,omitempty" tf:"storage,omitempty"` } - type MongodOperationProfilingInitParameters struct { + // Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // The fraction of slow operations that should be profiled or logged. Accepts values between 0 and 1, inclusive. For more information, see the operationProfiling.slowOpSampleRate description in the official documentation. + SlowOpSampleRate *float64 `json:"slowOpSampleRate,omitempty" tf:"slow_op_sample_rate,omitempty"` -// The fraction of slow operations that should be profiled or logged. Accepts values between 0 and 1, inclusive. For more information, see the operationProfiling.slowOpSampleRate description in the official documentation. -SlowOpSampleRate *float64 `json:"slowOpSampleRate,omitempty" tf:"slow_op_sample_rate,omitempty"` - -// The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. -SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` + // The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. + SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` } - type MongodOperationProfilingObservation struct { + // Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // The fraction of slow operations that should be profiled or logged. Accepts values between 0 and 1, inclusive. For more information, see the operationProfiling.slowOpSampleRate description in the official documentation. + SlowOpSampleRate *float64 `json:"slowOpSampleRate,omitempty" tf:"slow_op_sample_rate,omitempty"` -// The fraction of slow operations that should be profiled or logged. Accepts values between 0 and 1, inclusive. For more information, see the operationProfiling.slowOpSampleRate description in the official documentation. -SlowOpSampleRate *float64 `json:"slowOpSampleRate,omitempty" tf:"slow_op_sample_rate,omitempty"` - -// The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. -SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` + // The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. + SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` } - type MongodOperationProfilingParameters struct { + // Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. -// +kubebuilder:validation:Optional -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // The fraction of slow operations that should be profiled or logged. Accepts values between 0 and 1, inclusive. For more information, see the operationProfiling.slowOpSampleRate description in the official documentation. + // +kubebuilder:validation:Optional + SlowOpSampleRate *float64 `json:"slowOpSampleRate,omitempty" tf:"slow_op_sample_rate,omitempty"` -// The fraction of slow operations that should be profiled or logged. Accepts values between 0 and 1, inclusive. For more information, see the operationProfiling.slowOpSampleRate description in the official documentation. -// +kubebuilder:validation:Optional -SlowOpSampleRate *float64 `json:"slowOpSampleRate,omitempty" tf:"slow_op_sample_rate,omitempty"` - -// The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. -// +kubebuilder:validation:Optional -SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` + // The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. + // +kubebuilder:validation:Optional + SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` } - type MongodParameters struct { + // A set of audit log settings (see the auditLog option). The structure is documented below. Available only in enterprise edition. + // +kubebuilder:validation:Optional + AuditLog []AuditLogParameters `json:"auditLog,omitempty" tf:"audit_log,omitempty"` -// A set of audit log settings (see the auditLog option). The structure is documented below. Available only in enterprise edition. -// +kubebuilder:validation:Optional -AuditLog []AuditLogParameters `json:"auditLog,omitempty" tf:"audit_log,omitempty"` + // A set of network settings (see the net option). The structure is documented below. + // +kubebuilder:validation:Optional + Net []MongodNetParameters `json:"net,omitempty" tf:"net,omitempty"` -// A set of network settings (see the net option). The structure is documented below. -// +kubebuilder:validation:Optional -Net []MongodNetParameters `json:"net,omitempty" tf:"net,omitempty"` + // A set of profiling settings (see the operationProfiling option). The structure is documented below. + // +kubebuilder:validation:Optional + OperationProfiling []MongodOperationProfilingParameters `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` -// A set of profiling settings (see the operationProfiling option). The structure is documented below. -// +kubebuilder:validation:Optional -OperationProfiling []MongodOperationProfilingParameters `json:"operationProfiling,omitempty" tf:"operation_profiling,omitempty"` + // A set of MongoDB Security settings (see the security option). The structure is documented below. Available only in enterprise edition. + // +kubebuilder:validation:Optional + Security []SecurityParameters `json:"security,omitempty" tf:"security,omitempty"` -// A set of MongoDB Security settings (see the security option). The structure is documented below. Available only in enterprise edition. -// +kubebuilder:validation:Optional -Security []SecurityParameters `json:"security,omitempty" tf:"security,omitempty"` + // A set of MongoDB Server Parameters (see the setParameter option). The structure is documented below. + // +kubebuilder:validation:Optional + SetParameter []SetParameterParameters `json:"setParameter,omitempty" tf:"set_parameter,omitempty"` -// A set of MongoDB Server Parameters (see the setParameter option). The structure is documented below. -// +kubebuilder:validation:Optional -SetParameter []SetParameterParameters `json:"setParameter,omitempty" tf:"set_parameter,omitempty"` - -// A set of storage settings (see the storage option). The structure is documented below. -// +kubebuilder:validation:Optional -Storage []MongodStorageParameters `json:"storage,omitempty" tf:"storage,omitempty"` + // A set of storage settings (see the storage option). The structure is documented below. + // +kubebuilder:validation:Optional + Storage []MongodStorageParameters `json:"storage,omitempty" tf:"storage,omitempty"` } - type MongodStorageInitParameters struct { + // The durability journal to ensure data files remain valid and recoverable. The structure is documented below. + Journal []JournalInitParameters `json:"journal,omitempty" tf:"journal,omitempty"` -// The durability journal to ensure data files remain valid and recoverable. The structure is documented below. -Journal []JournalInitParameters `json:"journal,omitempty" tf:"journal,omitempty"` - -// The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. -WiredTiger []StorageWiredTigerInitParameters `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` + // The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. + WiredTiger []StorageWiredTigerInitParameters `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` } - type MongodStorageObservation struct { + // The durability journal to ensure data files remain valid and recoverable. The structure is documented below. + Journal []JournalObservation `json:"journal,omitempty" tf:"journal,omitempty"` -// The durability journal to ensure data files remain valid and recoverable. The structure is documented below. -Journal []JournalObservation `json:"journal,omitempty" tf:"journal,omitempty"` - -// The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. -WiredTiger []StorageWiredTigerObservation `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` + // The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. + WiredTiger []StorageWiredTigerObservation `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` } - type MongodStorageParameters struct { + // The durability journal to ensure data files remain valid and recoverable. The structure is documented below. + // +kubebuilder:validation:Optional + Journal []JournalParameters `json:"journal,omitempty" tf:"journal,omitempty"` -// The durability journal to ensure data files remain valid and recoverable. The structure is documented below. -// +kubebuilder:validation:Optional -Journal []JournalParameters `json:"journal,omitempty" tf:"journal,omitempty"` - -// The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. -// +kubebuilder:validation:Optional -WiredTiger []StorageWiredTigerParameters `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` + // The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. + // +kubebuilder:validation:Optional + WiredTiger []StorageWiredTigerParameters `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` } - type MongodbClusterDatabaseInitParameters struct { - -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type MongodbClusterDatabaseObservation struct { - -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type MongodbClusterDatabaseParameters struct { - -// The name of the database. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // The name of the database. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` } - type MongodbClusterHostInitParameters struct { + // Should this host have assigned public IP assigned. Can be either true or false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Should this host have assigned public IP assigned. Can be either true or false. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` - -// The parameters of mongod host in replicaset. -HostParameters []HostParametersInitParameters `json:"hostParameters,omitempty" tf:"host_parameters,omitempty"` + // The parameters of mongod host in replicaset. + HostParameters []HostParametersInitParameters `json:"hostParameters,omitempty" tf:"host_parameters,omitempty"` -// The role of the cluster (either PRIMARY or SECONDARY). -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role of the cluster (either PRIMARY or SECONDARY). + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The name of the shard to which the host belongs. Only for sharded cluster. -ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + // The name of the shard to which the host belongs. Only for sharded cluster. + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// type of mongo daemon which runs on this host (mongod, mongos, mongocfg, mongoinfra). Defaults to mongod. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // type of mongo daemon which runs on this host (mongod, mongos, mongocfg, mongoinfra). Defaults to mongod. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// The availability zone where the MongoDB host will be created. For more information see the official documentation. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // The availability zone where the MongoDB host will be created. For more information see the official documentation. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type MongodbClusterHostObservation struct { + // Should this host have assigned public IP assigned. Can be either true or false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Should this host have assigned public IP assigned. Can be either true or false. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // (Computed) The health of the host. + Health *string `json:"health,omitempty" tf:"health,omitempty"` -// (Computed) The health of the host. -Health *string `json:"health,omitempty" tf:"health,omitempty"` + // The parameters of mongod host in replicaset. + HostParameters []HostParametersObservation `json:"hostParameters,omitempty" tf:"host_parameters,omitempty"` -// The parameters of mongod host in replicaset. -HostParameters []HostParametersObservation `json:"hostParameters,omitempty" tf:"host_parameters,omitempty"` + // (Computed) The fully qualified domain name of the host. Computed on server side. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (Computed) The fully qualified domain name of the host. Computed on server side. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The role of the cluster (either PRIMARY or SECONDARY). + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role of the cluster (either PRIMARY or SECONDARY). -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The name of the shard to which the host belongs. Only for sharded cluster. + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` -// The name of the shard to which the host belongs. Only for sharded cluster. -ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // type of mongo daemon which runs on this host (mongod, mongos, mongocfg, mongoinfra). Defaults to mongod. + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// type of mongo daemon which runs on this host (mongod, mongos, mongocfg, mongoinfra). Defaults to mongod. -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// The availability zone where the MongoDB host will be created. For more information see the official documentation. -ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + // The availability zone where the MongoDB host will be created. For more information see the official documentation. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` } - type MongodbClusterHostParameters struct { + // Should this host have assigned public IP assigned. Can be either true or false. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Should this host have assigned public IP assigned. Can be either true or false. -// +kubebuilder:validation:Optional -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // The parameters of mongod host in replicaset. + // +kubebuilder:validation:Optional + HostParameters []HostParametersParameters `json:"hostParameters,omitempty" tf:"host_parameters,omitempty"` -// The parameters of mongod host in replicaset. -// +kubebuilder:validation:Optional -HostParameters []HostParametersParameters `json:"hostParameters,omitempty" tf:"host_parameters,omitempty"` + // The role of the cluster (either PRIMARY or SECONDARY). + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role of the cluster (either PRIMARY or SECONDARY). -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The name of the shard to which the host belongs. Only for sharded cluster. + // +kubebuilder:validation:Optional + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` -// The name of the shard to which the host belongs. Only for sharded cluster. -// +kubebuilder:validation:Optional -ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // type of mongo daemon which runs on this host (mongod, mongos, mongocfg, mongoinfra). Defaults to mongod. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// type of mongo daemon which runs on this host (mongod, mongos, mongocfg, mongoinfra). Defaults to mongod. -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// The availability zone where the MongoDB host will be created. For more information see the official documentation. -// +kubebuilder:validation:Optional -ZoneID *string `json:"zoneId" tf:"zone_id,omitempty"` + // The availability zone where the MongoDB host will be created. For more information see the official documentation. + // +kubebuilder:validation:Optional + ZoneID *string `json:"zoneId" tf:"zone_id,omitempty"` } - type MongodbClusterInitParameters struct { + // Configuration of the MongoDB subcluster. The structure is documented below. + ClusterConfig []ClusterConfigInitParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` -// Configuration of the MongoDB subcluster. The structure is documented below. -ClusterConfig []ClusterConfigInitParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + // The ID of the cluster. + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// The ID of the cluster. -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + // A database of the MongoDB cluster. The structure is documented below. + Database []MongodbClusterDatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` -// A database of the MongoDB cluster. The structure is documented below. -Database []MongodbClusterDatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Description of the MongoDB cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the MongoDB cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + DiskSizeAutoscalingMongocfg []DiskSizeAutoscalingMongocfgInitParameters `json:"diskSizeAutoscalingMongocfg,omitempty" tf:"disk_size_autoscaling_mongocfg,omitempty"` -DiskSizeAutoscalingMongocfg []DiskSizeAutoscalingMongocfgInitParameters `json:"diskSizeAutoscalingMongocfg,omitempty" tf:"disk_size_autoscaling_mongocfg,omitempty"` + DiskSizeAutoscalingMongod []DiskSizeAutoscalingMongodInitParameters `json:"diskSizeAutoscalingMongod,omitempty" tf:"disk_size_autoscaling_mongod,omitempty"` -DiskSizeAutoscalingMongod []DiskSizeAutoscalingMongodInitParameters `json:"diskSizeAutoscalingMongod,omitempty" tf:"disk_size_autoscaling_mongod,omitempty"` + DiskSizeAutoscalingMongoinfra []DiskSizeAutoscalingMongoinfraInitParameters `json:"diskSizeAutoscalingMongoinfra,omitempty" tf:"disk_size_autoscaling_mongoinfra,omitempty"` -DiskSizeAutoscalingMongoinfra []DiskSizeAutoscalingMongoinfraInitParameters `json:"diskSizeAutoscalingMongoinfra,omitempty" tf:"disk_size_autoscaling_mongoinfra,omitempty"` + DiskSizeAutoscalingMongos []DiskSizeAutoscalingMongosInitParameters `json:"diskSizeAutoscalingMongos,omitempty" tf:"disk_size_autoscaling_mongos,omitempty"` -DiskSizeAutoscalingMongos []DiskSizeAutoscalingMongosInitParameters `json:"diskSizeAutoscalingMongos,omitempty" tf:"disk_size_autoscaling_mongos,omitempty"` + // Deployment environment of the MongoDB cluster. Can be either PRESTABLE or PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// Deployment environment of the MongoDB cluster. Can be either PRESTABLE or PRODUCTION. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // A host of the MongoDB cluster. The structure is documented below. + Host []MongodbClusterHostInitParameters `json:"host,omitempty" tf:"host,omitempty"` -// A host of the MongoDB cluster. The structure is documented below. -Host []MongodbClusterHostInitParameters `json:"host,omitempty" tf:"host,omitempty"` + // A set of key/value label pairs to assign to the MongoDB cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the MongoDB cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Maintenance window settings of the MongoDB cluster. The structure is documented below. + MaintenanceWindow []MongodbClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Maintenance window settings of the MongoDB cluster. The structure is documented below. -MaintenanceWindow []MongodbClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Name of the MongoDB cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the MongoDB cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // ID of the network, to which the MongoDB cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// ID of the network, to which the MongoDB cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // (DEPRECATED, use resources_* instead) Resources allocated to hosts of the MongoDB cluster. The structure is documented below. + Resources []MongodbClusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` -// (DEPRECATED, use resources_* instead) Resources allocated to hosts of the MongoDB cluster. The structure is documented below. -Resources []MongodbClusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Resources allocated to mongocfg hosts of the MongoDB cluster. The structure is documented below. + ResourcesMongocfg []ResourcesMongocfgInitParameters `json:"resourcesMongocfg,omitempty" tf:"resources_mongocfg,omitempty"` -// Resources allocated to mongocfg hosts of the MongoDB cluster. The structure is documented below. -ResourcesMongocfg []ResourcesMongocfgInitParameters `json:"resourcesMongocfg,omitempty" tf:"resources_mongocfg,omitempty"` + // Resources allocated to mongod hosts of the MongoDB cluster. The structure is documented below. + ResourcesMongod []ResourcesMongodInitParameters `json:"resourcesMongod,omitempty" tf:"resources_mongod,omitempty"` -// Resources allocated to mongod hosts of the MongoDB cluster. The structure is documented below. -ResourcesMongod []ResourcesMongodInitParameters `json:"resourcesMongod,omitempty" tf:"resources_mongod,omitempty"` + // Resources allocated to mongoinfra hosts of the MongoDB cluster. The structure is documented below. + ResourcesMongoinfra []ResourcesMongoinfraInitParameters `json:"resourcesMongoinfra,omitempty" tf:"resources_mongoinfra,omitempty"` -// Resources allocated to mongoinfra hosts of the MongoDB cluster. The structure is documented below. -ResourcesMongoinfra []ResourcesMongoinfraInitParameters `json:"resourcesMongoinfra,omitempty" tf:"resources_mongoinfra,omitempty"` + // Resources allocated to mongos hosts of the MongoDB cluster. The structure is documented below. + ResourcesMongos []ResourcesMongosInitParameters `json:"resourcesMongos,omitempty" tf:"resources_mongos,omitempty"` -// Resources allocated to mongos hosts of the MongoDB cluster. The structure is documented below. -ResourcesMongos []ResourcesMongosInitParameters `json:"resourcesMongos,omitempty" tf:"resources_mongos,omitempty"` + // The cluster will be created from the specified backup. The structure is documented below. + Restore []RestoreInitParameters `json:"restore,omitempty" tf:"restore,omitempty"` -// The cluster will be created from the specified backup. The structure is documented below. -Restore []RestoreInitParameters `json:"restore,omitempty" tf:"restore,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` - -// A user of the MongoDB cluster. The structure is documented below. -User []MongodbClusterUserInitParameters `json:"user,omitempty" tf:"user,omitempty"` + // A user of the MongoDB cluster. The structure is documented below. + User []MongodbClusterUserInitParameters `json:"user,omitempty" tf:"user,omitempty"` } - type MongodbClusterMaintenanceWindowInitParameters struct { + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type MongodbClusterMaintenanceWindowObservation struct { + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type MongodbClusterMaintenanceWindowParameters struct { + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. -// +kubebuilder:validation:Optional -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. -// +kubebuilder:validation:Optional -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` } - type MongodbClusterObservation struct { + // Configuration of the MongoDB subcluster. The structure is documented below. + ClusterConfig []ClusterConfigObservation `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` -// Configuration of the MongoDB subcluster. The structure is documented below. -ClusterConfig []ClusterConfigObservation `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` - -// The ID of the cluster. -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + // The ID of the cluster. + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// Creation timestamp of the key. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// A database of the MongoDB cluster. The structure is documented below. -Database []MongodbClusterDatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` + // A database of the MongoDB cluster. The structure is documented below. + Database []MongodbClusterDatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the MongoDB cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the MongoDB cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -DiskSizeAutoscalingMongocfg []DiskSizeAutoscalingMongocfgObservation `json:"diskSizeAutoscalingMongocfg,omitempty" tf:"disk_size_autoscaling_mongocfg,omitempty"` + DiskSizeAutoscalingMongocfg []DiskSizeAutoscalingMongocfgObservation `json:"diskSizeAutoscalingMongocfg,omitempty" tf:"disk_size_autoscaling_mongocfg,omitempty"` -DiskSizeAutoscalingMongod []DiskSizeAutoscalingMongodObservation `json:"diskSizeAutoscalingMongod,omitempty" tf:"disk_size_autoscaling_mongod,omitempty"` + DiskSizeAutoscalingMongod []DiskSizeAutoscalingMongodObservation `json:"diskSizeAutoscalingMongod,omitempty" tf:"disk_size_autoscaling_mongod,omitempty"` -DiskSizeAutoscalingMongoinfra []DiskSizeAutoscalingMongoinfraObservation `json:"diskSizeAutoscalingMongoinfra,omitempty" tf:"disk_size_autoscaling_mongoinfra,omitempty"` + DiskSizeAutoscalingMongoinfra []DiskSizeAutoscalingMongoinfraObservation `json:"diskSizeAutoscalingMongoinfra,omitempty" tf:"disk_size_autoscaling_mongoinfra,omitempty"` -DiskSizeAutoscalingMongos []DiskSizeAutoscalingMongosObservation `json:"diskSizeAutoscalingMongos,omitempty" tf:"disk_size_autoscaling_mongos,omitempty"` + DiskSizeAutoscalingMongos []DiskSizeAutoscalingMongosObservation `json:"diskSizeAutoscalingMongos,omitempty" tf:"disk_size_autoscaling_mongos,omitempty"` -// Deployment environment of the MongoDB cluster. Can be either PRESTABLE or PRODUCTION. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Deployment environment of the MongoDB cluster. Can be either PRESTABLE or PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Aggregated health of the cluster. Can be either ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation. -Health *string `json:"health,omitempty" tf:"health,omitempty"` + // Aggregated health of the cluster. Can be either ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation. + Health *string `json:"health,omitempty" tf:"health,omitempty"` -// A host of the MongoDB cluster. The structure is documented below. -Host []MongodbClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` + // A host of the MongoDB cluster. The structure is documented below. + Host []MongodbClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the MongoDB cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the MongoDB cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Maintenance window settings of the MongoDB cluster. The structure is documented below. -MaintenanceWindow []MongodbClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Maintenance window settings of the MongoDB cluster. The structure is documented below. + MaintenanceWindow []MongodbClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Name of the MongoDB cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the MongoDB cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network, to which the MongoDB cluster belongs. -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network, to which the MongoDB cluster belongs. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// (DEPRECATED, use resources_* instead) Resources allocated to hosts of the MongoDB cluster. The structure is documented below. -Resources []MongodbClusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // (DEPRECATED, use resources_* instead) Resources allocated to hosts of the MongoDB cluster. The structure is documented below. + Resources []MongodbClusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` -// Resources allocated to mongocfg hosts of the MongoDB cluster. The structure is documented below. -ResourcesMongocfg []ResourcesMongocfgObservation `json:"resourcesMongocfg,omitempty" tf:"resources_mongocfg,omitempty"` + // Resources allocated to mongocfg hosts of the MongoDB cluster. The structure is documented below. + ResourcesMongocfg []ResourcesMongocfgObservation `json:"resourcesMongocfg,omitempty" tf:"resources_mongocfg,omitempty"` -// Resources allocated to mongod hosts of the MongoDB cluster. The structure is documented below. -ResourcesMongod []ResourcesMongodObservation `json:"resourcesMongod,omitempty" tf:"resources_mongod,omitempty"` + // Resources allocated to mongod hosts of the MongoDB cluster. The structure is documented below. + ResourcesMongod []ResourcesMongodObservation `json:"resourcesMongod,omitempty" tf:"resources_mongod,omitempty"` -// Resources allocated to mongoinfra hosts of the MongoDB cluster. The structure is documented below. -ResourcesMongoinfra []ResourcesMongoinfraObservation `json:"resourcesMongoinfra,omitempty" tf:"resources_mongoinfra,omitempty"` + // Resources allocated to mongoinfra hosts of the MongoDB cluster. The structure is documented below. + ResourcesMongoinfra []ResourcesMongoinfraObservation `json:"resourcesMongoinfra,omitempty" tf:"resources_mongoinfra,omitempty"` -// Resources allocated to mongos hosts of the MongoDB cluster. The structure is documented below. -ResourcesMongos []ResourcesMongosObservation `json:"resourcesMongos,omitempty" tf:"resources_mongos,omitempty"` + // Resources allocated to mongos hosts of the MongoDB cluster. The structure is documented below. + ResourcesMongos []ResourcesMongosObservation `json:"resourcesMongos,omitempty" tf:"resources_mongos,omitempty"` -// The cluster will be created from the specified backup. The structure is documented below. -Restore []RestoreObservation `json:"restore,omitempty" tf:"restore,omitempty"` + // The cluster will be created from the specified backup. The structure is documented below. + Restore []RestoreObservation `json:"restore,omitempty" tf:"restore,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// MongoDB Cluster mode enabled/disabled. -Sharded *bool `json:"sharded,omitempty" tf:"sharded,omitempty"` + // MongoDB Cluster mode enabled/disabled. + Sharded *bool `json:"sharded,omitempty" tf:"sharded,omitempty"` -// Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation. + Status *string `json:"status,omitempty" tf:"status,omitempty"` -// A user of the MongoDB cluster. The structure is documented below. -User []MongodbClusterUserObservation `json:"user,omitempty" tf:"user,omitempty"` + // A user of the MongoDB cluster. The structure is documented below. + User []MongodbClusterUserObservation `json:"user,omitempty" tf:"user,omitempty"` } - type MongodbClusterParameters struct { + // Configuration of the MongoDB subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + ClusterConfig []ClusterConfigParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` -// Configuration of the MongoDB subcluster. The structure is documented below. -// +kubebuilder:validation:Optional -ClusterConfig []ClusterConfigParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` - -// The ID of the cluster. -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + // The ID of the cluster. + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// A database of the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Database []MongodbClusterDatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` + // A database of the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Database []MongodbClusterDatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Description of the MongoDB cluster. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the MongoDB cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// +kubebuilder:validation:Optional -DiskSizeAutoscalingMongocfg []DiskSizeAutoscalingMongocfgParameters `json:"diskSizeAutoscalingMongocfg,omitempty" tf:"disk_size_autoscaling_mongocfg,omitempty"` + // +kubebuilder:validation:Optional + DiskSizeAutoscalingMongocfg []DiskSizeAutoscalingMongocfgParameters `json:"diskSizeAutoscalingMongocfg,omitempty" tf:"disk_size_autoscaling_mongocfg,omitempty"` -// +kubebuilder:validation:Optional -DiskSizeAutoscalingMongod []DiskSizeAutoscalingMongodParameters `json:"diskSizeAutoscalingMongod,omitempty" tf:"disk_size_autoscaling_mongod,omitempty"` + // +kubebuilder:validation:Optional + DiskSizeAutoscalingMongod []DiskSizeAutoscalingMongodParameters `json:"diskSizeAutoscalingMongod,omitempty" tf:"disk_size_autoscaling_mongod,omitempty"` -// +kubebuilder:validation:Optional -DiskSizeAutoscalingMongoinfra []DiskSizeAutoscalingMongoinfraParameters `json:"diskSizeAutoscalingMongoinfra,omitempty" tf:"disk_size_autoscaling_mongoinfra,omitempty"` + // +kubebuilder:validation:Optional + DiskSizeAutoscalingMongoinfra []DiskSizeAutoscalingMongoinfraParameters `json:"diskSizeAutoscalingMongoinfra,omitempty" tf:"disk_size_autoscaling_mongoinfra,omitempty"` -// +kubebuilder:validation:Optional -DiskSizeAutoscalingMongos []DiskSizeAutoscalingMongosParameters `json:"diskSizeAutoscalingMongos,omitempty" tf:"disk_size_autoscaling_mongos,omitempty"` + // +kubebuilder:validation:Optional + DiskSizeAutoscalingMongos []DiskSizeAutoscalingMongosParameters `json:"diskSizeAutoscalingMongos,omitempty" tf:"disk_size_autoscaling_mongos,omitempty"` -// Deployment environment of the MongoDB cluster. Can be either PRESTABLE or PRODUCTION. -// +kubebuilder:validation:Optional -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // Deployment environment of the MongoDB cluster. Can be either PRESTABLE or PRODUCTION. + // +kubebuilder:validation:Optional + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A host of the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Host []MongodbClusterHostParameters `json:"host,omitempty" tf:"host,omitempty"` + // A host of the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Host []MongodbClusterHostParameters `json:"host,omitempty" tf:"host,omitempty"` -// A set of key/value label pairs to assign to the MongoDB cluster. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the MongoDB cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Maintenance window settings of the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -MaintenanceWindow []MongodbClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Maintenance window settings of the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + MaintenanceWindow []MongodbClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Name of the MongoDB cluster. Provided by the client when the cluster is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the MongoDB cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// ID of the network, to which the MongoDB cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // ID of the network, to which the MongoDB cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// (DEPRECATED, use resources_* instead) Resources allocated to hosts of the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []MongodbClusterResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // (DEPRECATED, use resources_* instead) Resources allocated to hosts of the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []MongodbClusterResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` -// Resources allocated to mongocfg hosts of the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -ResourcesMongocfg []ResourcesMongocfgParameters `json:"resourcesMongocfg,omitempty" tf:"resources_mongocfg,omitempty"` + // Resources allocated to mongocfg hosts of the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + ResourcesMongocfg []ResourcesMongocfgParameters `json:"resourcesMongocfg,omitempty" tf:"resources_mongocfg,omitempty"` -// Resources allocated to mongod hosts of the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -ResourcesMongod []ResourcesMongodParameters `json:"resourcesMongod,omitempty" tf:"resources_mongod,omitempty"` + // Resources allocated to mongod hosts of the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + ResourcesMongod []ResourcesMongodParameters `json:"resourcesMongod,omitempty" tf:"resources_mongod,omitempty"` -// Resources allocated to mongoinfra hosts of the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -ResourcesMongoinfra []ResourcesMongoinfraParameters `json:"resourcesMongoinfra,omitempty" tf:"resources_mongoinfra,omitempty"` + // Resources allocated to mongoinfra hosts of the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + ResourcesMongoinfra []ResourcesMongoinfraParameters `json:"resourcesMongoinfra,omitempty" tf:"resources_mongoinfra,omitempty"` -// Resources allocated to mongos hosts of the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -ResourcesMongos []ResourcesMongosParameters `json:"resourcesMongos,omitempty" tf:"resources_mongos,omitempty"` + // Resources allocated to mongos hosts of the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + ResourcesMongos []ResourcesMongosParameters `json:"resourcesMongos,omitempty" tf:"resources_mongos,omitempty"` -// The cluster will be created from the specified backup. The structure is documented below. -// +kubebuilder:validation:Optional -Restore []RestoreParameters `json:"restore,omitempty" tf:"restore,omitempty"` + // The cluster will be created from the specified backup. The structure is documented below. + // +kubebuilder:validation:Optional + Restore []RestoreParameters `json:"restore,omitempty" tf:"restore,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` -// A user of the MongoDB cluster. The structure is documented below. -// +kubebuilder:validation:Optional -User []MongodbClusterUserParameters `json:"user,omitempty" tf:"user,omitempty"` + // A user of the MongoDB cluster. The structure is documented below. + // +kubebuilder:validation:Optional + User []MongodbClusterUserParameters `json:"user,omitempty" tf:"user,omitempty"` } - type MongodbClusterResourcesInitParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type MongodbClusterResourcesObservation struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type MongodbClusterResourcesParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` - -// Type of the storage of MongoDB hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type MongodbClusterUserInitParameters struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The password of the user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// The password of the user. -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` - -// Set of permissions granted to the user. The structure is documented below. -Permission []MongodbClusterUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []MongodbClusterUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type MongodbClusterUserObservation struct { + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Set of permissions granted to the user. The structure is documented below. -Permission []MongodbClusterUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []MongodbClusterUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` } - type MongodbClusterUserParameters struct { + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// The password of the user. -// +kubebuilder:validation:Optional -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` - -// Set of permissions granted to the user. The structure is documented below. -// +kubebuilder:validation:Optional -Permission []MongodbClusterUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + // +kubebuilder:validation:Optional + Permission []MongodbClusterUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type MongodbClusterUserPermissionInitParameters struct { + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` - -// The roles of the user in this database. For more information see the official documentation. -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // The roles of the user in this database. For more information see the official documentation. + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } - type MongodbClusterUserPermissionObservation struct { + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` - -// The roles of the user in this database. For more information see the official documentation. -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // The roles of the user in this database. For more information see the official documentation. + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } - type MongodbClusterUserPermissionParameters struct { + // The name of the database that the permission grants access to. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -// +kubebuilder:validation:Optional -DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` - -// The roles of the user in this database. For more information see the official documentation. -// +kubebuilder:validation:Optional -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // The roles of the user in this database. For more information see the official documentation. + // +kubebuilder:validation:Optional + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } - type MongosInitParameters struct { - -// A set of network settings (see the net option). The structure is documented below. -Net []MongosNetInitParameters `json:"net,omitempty" tf:"net,omitempty"` + // A set of network settings (see the net option). The structure is documented below. + Net []MongosNetInitParameters `json:"net,omitempty" tf:"net,omitempty"` } - type MongosNetInitParameters struct { + // Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. + Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` -// Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. -Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` - -// The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. -MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` + // The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. + MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` } - type MongosNetObservation struct { + // Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. + Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` -// Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. -Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` - -// The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. -MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` + // The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. + MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` } - type MongosNetParameters struct { + // Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. + // +kubebuilder:validation:Optional + Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` -// Specifies the default compressor(s) to use for communication between this mongod or mongos. Accepts array of compressors. Order matters. Available compressors: snappy, zlib, zstd, disabled. To disable network compression, make "disabled" the only value. For more information, see the net.Compression.Compressors description in the official documentation. -// +kubebuilder:validation:Optional -Compressors []*string `json:"compressors,omitempty" tf:"compressors,omitempty"` - -// The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. -// +kubebuilder:validation:Optional -MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` + // The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. + // +kubebuilder:validation:Optional + MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` } - type MongosObservation struct { - -// A set of network settings (see the net option). The structure is documented below. -Net []MongosNetObservation `json:"net,omitempty" tf:"net,omitempty"` + // A set of network settings (see the net option). The structure is documented below. + Net []MongosNetObservation `json:"net,omitempty" tf:"net,omitempty"` } - type MongosParameters struct { - -// A set of network settings (see the net option). The structure is documented below. -// +kubebuilder:validation:Optional -Net []MongosNetParameters `json:"net,omitempty" tf:"net,omitempty"` + // A set of network settings (see the net option). The structure is documented below. + // +kubebuilder:validation:Optional + Net []MongosNetParameters `json:"net,omitempty" tf:"net,omitempty"` } - type NetInitParameters struct { - -// The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. -MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` + // The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. + MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` } - type NetObservation struct { - -// The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. -MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` + // The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. + MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` } - type NetParameters struct { - -// The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. -// +kubebuilder:validation:Optional -MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` + // The maximum number of simultaneous connections that host will accept. For more information, see the net.maxIncomingConnections description in the official documentation. + // +kubebuilder:validation:Optional + MaxIncomingConnections *float64 `json:"maxIncomingConnections,omitempty" tf:"max_incoming_connections,omitempty"` } - type OperationProfilingInitParameters struct { + // Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` - -// The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. -SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` + // The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. + SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` } - type OperationProfilingObservation struct { + // Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` - -// The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. -SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` + // The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. + SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` } - type OperationProfilingParameters struct { + // Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Specifies which operations should be profiled. The following profiler levels are available: off, slow_op, all. For more information, see the operationProfiling.mode description in the official documentation. -// +kubebuilder:validation:Optional -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` - -// The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. -// +kubebuilder:validation:Optional -SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` + // The slow operation time threshold, in milliseconds. Operations that run for longer than this threshold are considered slow. For more information, see the operationProfiling.slowOpThresholdMs description in the official documentation. + // +kubebuilder:validation:Optional + SlowOpThreshold *float64 `json:"slowOpThreshold,omitempty" tf:"slow_op_threshold,omitempty"` } - type PerformanceDiagnosticsInitParameters struct { - -// Enable or disable performance diagnostics. -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // Enable or disable performance diagnostics. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type PerformanceDiagnosticsObservation struct { - -// Enable or disable performance diagnostics. -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // Enable or disable performance diagnostics. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type PerformanceDiagnosticsParameters struct { - -// Enable or disable performance diagnostics. -// +kubebuilder:validation:Optional -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // Enable or disable performance diagnostics. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` } - type ResourcesMongocfgInitParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesMongocfgObservation struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesMongocfgParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// Type of the storage of MongoDB hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` - -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type ResourcesMongodInitParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesMongodObservation struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesMongodParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// Type of the storage of MongoDB hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` - -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type ResourcesMongoinfraInitParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesMongoinfraObservation struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesMongoinfraParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` - -// Type of the storage of MongoDB hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type ResourcesMongosInitParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesMongosObservation struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of MongoDB hosts. For more information see the official documentation. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type ResourcesMongosParameters struct { + // Volume of the storage available to a MongoDB host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a MongoDB host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` - -// Type of the storage of MongoDB hosts. For more information see the official documentation. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` + // Type of the storage of MongoDB hosts. For more information see the official documentation. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type RestoreInitParameters struct { + // Backup ID. The cluster will be created from the specified backup. How to get a list of PostgreSQL backups + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` -// Backup ID. The cluster will be created from the specified backup. How to get a list of PostgreSQL backups -BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` - -// Timestamp of the moment to which the MongoDB cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. -Time *string `json:"time,omitempty" tf:"time,omitempty"` + // Timestamp of the moment to which the MongoDB cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. + Time *string `json:"time,omitempty" tf:"time,omitempty"` } - type RestoreObservation struct { + // Backup ID. The cluster will be created from the specified backup. How to get a list of PostgreSQL backups + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` -// Backup ID. The cluster will be created from the specified backup. How to get a list of PostgreSQL backups -BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` - -// Timestamp of the moment to which the MongoDB cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. -Time *string `json:"time,omitempty" tf:"time,omitempty"` + // Timestamp of the moment to which the MongoDB cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. + Time *string `json:"time,omitempty" tf:"time,omitempty"` } - type RestoreParameters struct { + // Backup ID. The cluster will be created from the specified backup. How to get a list of PostgreSQL backups + // +kubebuilder:validation:Optional + BackupID *string `json:"backupId" tf:"backup_id,omitempty"` -// Backup ID. The cluster will be created from the specified backup. How to get a list of PostgreSQL backups -// +kubebuilder:validation:Optional -BackupID *string `json:"backupId" tf:"backup_id,omitempty"` - -// Timestamp of the moment to which the MongoDB cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. -// +kubebuilder:validation:Optional -Time *string `json:"time,omitempty" tf:"time,omitempty"` + // Timestamp of the moment to which the MongoDB cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. + // +kubebuilder:validation:Optional + Time *string `json:"time,omitempty" tf:"time,omitempty"` } - type SecurityInitParameters struct { + // Enables the encryption for the WiredTiger storage engine. Can be either true or false. For more information see security.enableEncryption description in the official documentation. Available only in enterprise edition. + EnableEncryption *bool `json:"enableEncryption,omitempty" tf:"enable_encryption,omitempty"` -// Enables the encryption for the WiredTiger storage engine. Can be either true or false. For more information see security.enableEncryption description in the official documentation. Available only in enterprise edition. -EnableEncryption *bool `json:"enableEncryption,omitempty" tf:"enable_encryption,omitempty"` - -// Configuration of the third party key management appliance via the Key Management Interoperability Protocol (KMIP) (see Encryption tutorial ). Requires enable_encryption to be true. The structure is documented below. Available only in enterprise edition. -Kmip []KmipInitParameters `json:"kmip,omitempty" tf:"kmip,omitempty"` + // Configuration of the third party key management appliance via the Key Management Interoperability Protocol (KMIP) (see Encryption tutorial ). Requires enable_encryption to be true. The structure is documented below. Available only in enterprise edition. + Kmip []KmipInitParameters `json:"kmip,omitempty" tf:"kmip,omitempty"` } - type SecurityObservation struct { + // Enables the encryption for the WiredTiger storage engine. Can be either true or false. For more information see security.enableEncryption description in the official documentation. Available only in enterprise edition. + EnableEncryption *bool `json:"enableEncryption,omitempty" tf:"enable_encryption,omitempty"` -// Enables the encryption for the WiredTiger storage engine. Can be either true or false. For more information see security.enableEncryption description in the official documentation. Available only in enterprise edition. -EnableEncryption *bool `json:"enableEncryption,omitempty" tf:"enable_encryption,omitempty"` - -// Configuration of the third party key management appliance via the Key Management Interoperability Protocol (KMIP) (see Encryption tutorial ). Requires enable_encryption to be true. The structure is documented below. Available only in enterprise edition. -Kmip []KmipObservation `json:"kmip,omitempty" tf:"kmip,omitempty"` + // Configuration of the third party key management appliance via the Key Management Interoperability Protocol (KMIP) (see Encryption tutorial ). Requires enable_encryption to be true. The structure is documented below. Available only in enterprise edition. + Kmip []KmipObservation `json:"kmip,omitempty" tf:"kmip,omitempty"` } - type SecurityParameters struct { + // Enables the encryption for the WiredTiger storage engine. Can be either true or false. For more information see security.enableEncryption description in the official documentation. Available only in enterprise edition. + // +kubebuilder:validation:Optional + EnableEncryption *bool `json:"enableEncryption,omitempty" tf:"enable_encryption,omitempty"` -// Enables the encryption for the WiredTiger storage engine. Can be either true or false. For more information see security.enableEncryption description in the official documentation. Available only in enterprise edition. -// +kubebuilder:validation:Optional -EnableEncryption *bool `json:"enableEncryption,omitempty" tf:"enable_encryption,omitempty"` - -// Configuration of the third party key management appliance via the Key Management Interoperability Protocol (KMIP) (see Encryption tutorial ). Requires enable_encryption to be true. The structure is documented below. Available only in enterprise edition. -// +kubebuilder:validation:Optional -Kmip []KmipParameters `json:"kmip,omitempty" tf:"kmip,omitempty"` + // Configuration of the third party key management appliance via the Key Management Interoperability Protocol (KMIP) (see Encryption tutorial ). Requires enable_encryption to be true. The structure is documented below. Available only in enterprise edition. + // +kubebuilder:validation:Optional + Kmip []KmipParameters `json:"kmip,omitempty" tf:"kmip,omitempty"` } - type SetParameterInitParameters struct { + // Enables the auditing of authorization successes. Can be either true or false. For more information, see the auditAuthorizationSuccess description in the official documentation. Available only in enterprise edition. + AuditAuthorizationSuccess *bool `json:"auditAuthorizationSuccess,omitempty" tf:"audit_authorization_success,omitempty"` -// Enables the auditing of authorization successes. Can be either true or false. For more information, see the auditAuthorizationSuccess description in the official documentation. Available only in enterprise edition. -AuditAuthorizationSuccess *bool `json:"auditAuthorizationSuccess,omitempty" tf:"audit_authorization_success,omitempty"` - -// Enables the flow control. Can be either true or false. For more information, see the enableFlowControl description in the official documentation. -EnableFlowControl *bool `json:"enableFlowControl,omitempty" tf:"enable_flow_control,omitempty"` + // Enables the flow control. Can be either true or false. For more information, see the enableFlowControl description in the official documentation. + EnableFlowControl *bool `json:"enableFlowControl,omitempty" tf:"enable_flow_control,omitempty"` -// The minimum time window in seconds for which the storage engine keeps the snapshot history. For more information, see the minSnapshotHistoryWindowInSeconds description in the official documentation. -MinSnapshotHistoryWindowInSeconds *float64 `json:"minSnapshotHistoryWindowInSeconds,omitempty" tf:"min_snapshot_history_window_in_seconds,omitempty"` + // The minimum time window in seconds for which the storage engine keeps the snapshot history. For more information, see the minSnapshotHistoryWindowInSeconds description in the official documentation. + MinSnapshotHistoryWindowInSeconds *float64 `json:"minSnapshotHistoryWindowInSeconds,omitempty" tf:"min_snapshot_history_window_in_seconds,omitempty"` } - type SetParameterObservation struct { + // Enables the auditing of authorization successes. Can be either true or false. For more information, see the auditAuthorizationSuccess description in the official documentation. Available only in enterprise edition. + AuditAuthorizationSuccess *bool `json:"auditAuthorizationSuccess,omitempty" tf:"audit_authorization_success,omitempty"` -// Enables the auditing of authorization successes. Can be either true or false. For more information, see the auditAuthorizationSuccess description in the official documentation. Available only in enterprise edition. -AuditAuthorizationSuccess *bool `json:"auditAuthorizationSuccess,omitempty" tf:"audit_authorization_success,omitempty"` - -// Enables the flow control. Can be either true or false. For more information, see the enableFlowControl description in the official documentation. -EnableFlowControl *bool `json:"enableFlowControl,omitempty" tf:"enable_flow_control,omitempty"` + // Enables the flow control. Can be either true or false. For more information, see the enableFlowControl description in the official documentation. + EnableFlowControl *bool `json:"enableFlowControl,omitempty" tf:"enable_flow_control,omitempty"` -// The minimum time window in seconds for which the storage engine keeps the snapshot history. For more information, see the minSnapshotHistoryWindowInSeconds description in the official documentation. -MinSnapshotHistoryWindowInSeconds *float64 `json:"minSnapshotHistoryWindowInSeconds,omitempty" tf:"min_snapshot_history_window_in_seconds,omitempty"` + // The minimum time window in seconds for which the storage engine keeps the snapshot history. For more information, see the minSnapshotHistoryWindowInSeconds description in the official documentation. + MinSnapshotHistoryWindowInSeconds *float64 `json:"minSnapshotHistoryWindowInSeconds,omitempty" tf:"min_snapshot_history_window_in_seconds,omitempty"` } - type SetParameterParameters struct { + // Enables the auditing of authorization successes. Can be either true or false. For more information, see the auditAuthorizationSuccess description in the official documentation. Available only in enterprise edition. + // +kubebuilder:validation:Optional + AuditAuthorizationSuccess *bool `json:"auditAuthorizationSuccess,omitempty" tf:"audit_authorization_success,omitempty"` -// Enables the auditing of authorization successes. Can be either true or false. For more information, see the auditAuthorizationSuccess description in the official documentation. Available only in enterprise edition. -// +kubebuilder:validation:Optional -AuditAuthorizationSuccess *bool `json:"auditAuthorizationSuccess,omitempty" tf:"audit_authorization_success,omitempty"` - -// Enables the flow control. Can be either true or false. For more information, see the enableFlowControl description in the official documentation. -// +kubebuilder:validation:Optional -EnableFlowControl *bool `json:"enableFlowControl,omitempty" tf:"enable_flow_control,omitempty"` + // Enables the flow control. Can be either true or false. For more information, see the enableFlowControl description in the official documentation. + // +kubebuilder:validation:Optional + EnableFlowControl *bool `json:"enableFlowControl,omitempty" tf:"enable_flow_control,omitempty"` -// The minimum time window in seconds for which the storage engine keeps the snapshot history. For more information, see the minSnapshotHistoryWindowInSeconds description in the official documentation. -// +kubebuilder:validation:Optional -MinSnapshotHistoryWindowInSeconds *float64 `json:"minSnapshotHistoryWindowInSeconds,omitempty" tf:"min_snapshot_history_window_in_seconds,omitempty"` + // The minimum time window in seconds for which the storage engine keeps the snapshot history. For more information, see the minSnapshotHistoryWindowInSeconds description in the official documentation. + // +kubebuilder:validation:Optional + MinSnapshotHistoryWindowInSeconds *float64 `json:"minSnapshotHistoryWindowInSeconds,omitempty" tf:"min_snapshot_history_window_in_seconds,omitempty"` } - type StorageInitParameters struct { - -// The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. -WiredTiger []WiredTigerInitParameters `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` + // The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. + WiredTiger []WiredTigerInitParameters `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` } - type StorageObservation struct { - -// The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. -WiredTiger []WiredTigerObservation `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` + // The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. + WiredTiger []WiredTigerObservation `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` } - type StorageParameters struct { - -// The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. -// +kubebuilder:validation:Optional -WiredTiger []WiredTigerParameters `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` + // The WiredTiger engine settings. (see the storage.wiredTiger option). These settings available only on mongod hosts. The structure is documented below. + // +kubebuilder:validation:Optional + WiredTiger []WiredTigerParameters `json:"wiredTiger,omitempty" tf:"wired_tiger,omitempty"` } - type StorageWiredTigerInitParameters struct { + // Specifies the default compression for collection data. You can override this on a per-collection basis when creating collections. Available compressors are: none, snappy, zlib, zstd. This setting available only on mongod hosts. For more information, see the storage.wiredTiger.collectionConfig.blockCompressor description in the official documentation. + BlockCompressor *string `json:"blockCompressor,omitempty" tf:"block_compressor,omitempty"` -// Specifies the default compression for collection data. You can override this on a per-collection basis when creating collections. Available compressors are: none, snappy, zlib, zstd. This setting available only on mongod hosts. For more information, see the storage.wiredTiger.collectionConfig.blockCompressor description in the official documentation. -BlockCompressor *string `json:"blockCompressor,omitempty" tf:"block_compressor,omitempty"` + // Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. + CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` -// Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. -CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` - -// Enables or disables prefix compression for index data. Сan be either true or false. For more information, see the storage.wiredTiger.indexConfig.prefixCompression description in the official documentation. -PrefixCompression *bool `json:"prefixCompression,omitempty" tf:"prefix_compression,omitempty"` + // Enables or disables prefix compression for index data. Сan be either true or false. For more information, see the storage.wiredTiger.indexConfig.prefixCompression description in the official documentation. + PrefixCompression *bool `json:"prefixCompression,omitempty" tf:"prefix_compression,omitempty"` } - type StorageWiredTigerObservation struct { + // Specifies the default compression for collection data. You can override this on a per-collection basis when creating collections. Available compressors are: none, snappy, zlib, zstd. This setting available only on mongod hosts. For more information, see the storage.wiredTiger.collectionConfig.blockCompressor description in the official documentation. + BlockCompressor *string `json:"blockCompressor,omitempty" tf:"block_compressor,omitempty"` -// Specifies the default compression for collection data. You can override this on a per-collection basis when creating collections. Available compressors are: none, snappy, zlib, zstd. This setting available only on mongod hosts. For more information, see the storage.wiredTiger.collectionConfig.blockCompressor description in the official documentation. -BlockCompressor *string `json:"blockCompressor,omitempty" tf:"block_compressor,omitempty"` + // Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. + CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` -// Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. -CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` - -// Enables or disables prefix compression for index data. Сan be either true or false. For more information, see the storage.wiredTiger.indexConfig.prefixCompression description in the official documentation. -PrefixCompression *bool `json:"prefixCompression,omitempty" tf:"prefix_compression,omitempty"` + // Enables or disables prefix compression for index data. Сan be either true or false. For more information, see the storage.wiredTiger.indexConfig.prefixCompression description in the official documentation. + PrefixCompression *bool `json:"prefixCompression,omitempty" tf:"prefix_compression,omitempty"` } - type StorageWiredTigerParameters struct { + // Specifies the default compression for collection data. You can override this on a per-collection basis when creating collections. Available compressors are: none, snappy, zlib, zstd. This setting available only on mongod hosts. For more information, see the storage.wiredTiger.collectionConfig.blockCompressor description in the official documentation. + // +kubebuilder:validation:Optional + BlockCompressor *string `json:"blockCompressor,omitempty" tf:"block_compressor,omitempty"` -// Specifies the default compression for collection data. You can override this on a per-collection basis when creating collections. Available compressors are: none, snappy, zlib, zstd. This setting available only on mongod hosts. For more information, see the storage.wiredTiger.collectionConfig.blockCompressor description in the official documentation. -// +kubebuilder:validation:Optional -BlockCompressor *string `json:"blockCompressor,omitempty" tf:"block_compressor,omitempty"` - -// Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. -// +kubebuilder:validation:Optional -CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` + // Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. + // +kubebuilder:validation:Optional + CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` -// Enables or disables prefix compression for index data. Сan be either true or false. For more information, see the storage.wiredTiger.indexConfig.prefixCompression description in the official documentation. -// +kubebuilder:validation:Optional -PrefixCompression *bool `json:"prefixCompression,omitempty" tf:"prefix_compression,omitempty"` + // Enables or disables prefix compression for index data. Сan be either true or false. For more information, see the storage.wiredTiger.indexConfig.prefixCompression description in the official documentation. + // +kubebuilder:validation:Optional + PrefixCompression *bool `json:"prefixCompression,omitempty" tf:"prefix_compression,omitempty"` } - type WiredTigerInitParameters struct { - -// Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. -CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` + // Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. + CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` } - type WiredTigerObservation struct { - -// Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. -CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` + // Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. + CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` } - type WiredTigerParameters struct { - -// Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. -// +kubebuilder:validation:Optional -CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` + // Defines the maximum size of the internal cache that WiredTiger will use for all data. For more information, see the storage.wiredTiger.engineConfig.cacheSizeGB description in the official documentation. + // +kubebuilder:validation:Optional + CacheSizeGb *float64 `json:"cacheSizeGb,omitempty" tf:"cache_size_gb,omitempty"` } // MongodbClusterSpec defines the desired state of MongodbCluster type MongodbClusterSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider MongodbClusterParameters `json:"forProvider"` + ForProvider MongodbClusterParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -1939,20 +1706,19 @@ type MongodbClusterSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider MongodbClusterInitParameters `json:"initProvider,omitempty"` + InitProvider MongodbClusterInitParameters `json:"initProvider,omitempty"` } // MongodbClusterStatus defines the observed state of MongodbCluster. type MongodbClusterStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider MongodbClusterObservation `json:"atProvider,omitempty"` + AtProvider MongodbClusterObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // MongodbCluster is the Schema for the MongodbClusters API. Manages a MongoDB cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -1962,12 +1728,12 @@ type MongodbClusterStatus struct { type MongodbCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterConfig) || (has(self.initProvider) && has(self.initProvider.clusterConfig))",message="spec.forProvider.clusterConfig is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.host) || (has(self.initProvider) && has(self.initProvider.host))",message="spec.forProvider.host is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec MongodbClusterSpec `json:"spec"` - Status MongodbClusterStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterConfig) || (has(self.initProvider) && has(self.initProvider.clusterConfig))",message="spec.forProvider.clusterConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.host) || (has(self.initProvider) && has(self.initProvider.host))",message="spec.forProvider.host is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec MongodbClusterSpec `json:"spec"` + Status MongodbClusterStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_mongodbdatabase_terraformed.go b/apis/mdb/v1alpha1/zz_mongodbdatabase_terraformed.go new file mode 100755 index 0000000..53e81da --- /dev/null +++ b/apis/mdb/v1alpha1/zz_mongodbdatabase_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MongodbDatabase +func (mg *MongodbDatabase) GetTerraformResourceType() string { + return "yandex_mdb_mongodb_database" +} + +// GetConnectionDetailsMapping for this MongodbDatabase +func (tr *MongodbDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MongodbDatabase +func (tr *MongodbDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MongodbDatabase +func (tr *MongodbDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MongodbDatabase +func (tr *MongodbDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MongodbDatabase +func (tr *MongodbDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MongodbDatabase +func (tr *MongodbDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MongodbDatabase +func (tr *MongodbDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MongodbDatabase +func (tr *MongodbDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MongodbDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MongodbDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &MongodbDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MongodbDatabase) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_mongodbdatabase_types.go b/apis/mdb/v1alpha1/zz_mongodbdatabase_types.go index b31943e..82cf2d4 100755 --- a/apis/mdb/v1alpha1/zz_mongodbdatabase_types.go +++ b/apis/mdb/v1alpha1/zz_mongodbdatabase_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,67 +7,57 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type MongodbDatabaseInitParameters struct { + // +crossplane:generate:reference:type=MongodbCluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=MongodbCluster -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a MongodbCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a MongodbCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a MongodbCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a MongodbCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type MongodbDatabaseObservation struct { + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type MongodbDatabaseParameters struct { + // +crossplane:generate:reference:type=MongodbCluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=MongodbCluster -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a MongodbCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a MongodbCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a MongodbCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a MongodbCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the database. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } // MongodbDatabaseSpec defines the desired state of MongodbDatabase type MongodbDatabaseSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider MongodbDatabaseParameters `json:"forProvider"` + ForProvider MongodbDatabaseParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -80,20 +68,19 @@ type MongodbDatabaseSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider MongodbDatabaseInitParameters `json:"initProvider,omitempty"` + InitProvider MongodbDatabaseInitParameters `json:"initProvider,omitempty"` } // MongodbDatabaseStatus defines the observed state of MongodbDatabase. type MongodbDatabaseStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider MongodbDatabaseObservation `json:"atProvider,omitempty"` + AtProvider MongodbDatabaseObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // MongodbDatabase is the Schema for the MongodbDatabases API. Manages a MongoDB database within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -103,9 +90,9 @@ type MongodbDatabaseStatus struct { type MongodbDatabase struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec MongodbDatabaseSpec `json:"spec"` - Status MongodbDatabaseStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec MongodbDatabaseSpec `json:"spec"` + Status MongodbDatabaseStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_mongodbuser_terraformed.go b/apis/mdb/v1alpha1/zz_mongodbuser_terraformed.go new file mode 100755 index 0000000..922f436 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_mongodbuser_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MongodbUser +func (mg *MongodbUser) GetTerraformResourceType() string { + return "yandex_mdb_mongodb_user" +} + +// GetConnectionDetailsMapping for this MongodbUser +func (tr *MongodbUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "passwordSecretRef"} +} + +// GetObservation of this MongodbUser +func (tr *MongodbUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MongodbUser +func (tr *MongodbUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MongodbUser +func (tr *MongodbUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MongodbUser +func (tr *MongodbUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MongodbUser +func (tr *MongodbUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MongodbUser +func (tr *MongodbUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MongodbUser +func (tr *MongodbUser) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MongodbUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MongodbUser) LateInitialize(attrs []byte) (bool, error) { + params := &MongodbUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MongodbUser) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_mongodbuser_types.go b/apis/mdb/v1alpha1/zz_mongodbuser_types.go index 2688ede..fd10406 100755 --- a/apis/mdb/v1alpha1/zz_mongodbuser_types.go +++ b/apis/mdb/v1alpha1/zz_mongodbuser_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,122 +7,106 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type MongodbUserInitParameters struct { + // +crossplane:generate:reference:type=MongodbCluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=MongodbCluster -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a MongodbCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a MongodbCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a MongodbCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a MongodbCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The password of the user. -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + // The password of the user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// Set of permissions granted to the user. The structure is documented below. -Permission []MongodbUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []MongodbUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type MongodbUserObservation struct { + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Set of permissions granted to the user. The structure is documented below. -Permission []MongodbUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []MongodbUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` } - type MongodbUserParameters struct { + // +crossplane:generate:reference:type=MongodbCluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=MongodbCluster -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a MongodbCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a MongodbCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a MongodbCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a MongodbCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The password of the user. -// +kubebuilder:validation:Optional -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// Set of permissions granted to the user. The structure is documented below. -// +kubebuilder:validation:Optional -Permission []MongodbUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + // +kubebuilder:validation:Optional + Permission []MongodbUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type MongodbUserPermissionInitParameters struct { + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` - -// List of strings. The roles of the user in this database. For more information see the official documentation. -// +listType=set -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // List of strings. The roles of the user in this database. For more information see the official documentation. + // +listType=set + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } - type MongodbUserPermissionObservation struct { + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` - -// List of strings. The roles of the user in this database. For more information see the official documentation. -// +listType=set -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // List of strings. The roles of the user in this database. For more information see the official documentation. + // +listType=set + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } - type MongodbUserPermissionParameters struct { + // The name of the database that the permission grants access to. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -// +kubebuilder:validation:Optional -DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` - -// List of strings. The roles of the user in this database. For more information see the official documentation. -// +kubebuilder:validation:Optional -// +listType=set -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // List of strings. The roles of the user in this database. For more information see the official documentation. + // +kubebuilder:validation:Optional + // +listType=set + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } // MongodbUserSpec defines the desired state of MongodbUser type MongodbUserSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider MongodbUserParameters `json:"forProvider"` + ForProvider MongodbUserParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -135,20 +117,19 @@ type MongodbUserSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider MongodbUserInitParameters `json:"initProvider,omitempty"` + InitProvider MongodbUserInitParameters `json:"initProvider,omitempty"` } // MongodbUserStatus defines the observed state of MongodbUser. type MongodbUserStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider MongodbUserObservation `json:"atProvider,omitempty"` + AtProvider MongodbUserObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // MongodbUser is the Schema for the MongodbUsers API. Manages a MongoDB user within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -158,10 +139,10 @@ type MongodbUserStatus struct { type MongodbUser struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" - Spec MongodbUserSpec `json:"spec"` - Status MongodbUserStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" + Spec MongodbUserSpec `json:"spec"` + Status MongodbUserStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_mysqlcluster_terraformed.go b/apis/mdb/v1alpha1/zz_mysqlcluster_terraformed.go new file mode 100755 index 0000000..a8a97cf --- /dev/null +++ b/apis/mdb/v1alpha1/zz_mysqlcluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MySQLCluster +func (mg *MySQLCluster) GetTerraformResourceType() string { + return "yandex_mdb_mysql_cluster" +} + +// GetConnectionDetailsMapping for this MySQLCluster +func (tr *MySQLCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"user[*].password": "user[*].passwordSecretRef"} +} + +// GetObservation of this MySQLCluster +func (tr *MySQLCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MySQLCluster +func (tr *MySQLCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MySQLCluster +func (tr *MySQLCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MySQLCluster +func (tr *MySQLCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MySQLCluster +func (tr *MySQLCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MySQLCluster +func (tr *MySQLCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MySQLCluster +func (tr *MySQLCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MySQLCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MySQLCluster) LateInitialize(attrs []byte) (bool, error) { + params := &MySQLClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MySQLCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_mysqlcluster_types.go b/apis/mdb/v1alpha1/zz_mysqlcluster_types.go index 55e4daa..f49b7bb 100755 --- a/apis/mdb/v1alpha1/zz_mysqlcluster_types.go +++ b/apis/mdb/v1alpha1/zz_mysqlcluster_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,873 +7,798 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type ConnectionLimitsInitParameters struct { + // Max connections per hour. + MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` -// Max connections per hour. -MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` + // Max questions per hour. + MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` -// Max questions per hour. -MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` + // Max updates per hour. + MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` -// Max updates per hour. -MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` - -// Max user connections. -MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` + // Max user connections. + MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` } - type ConnectionLimitsObservation struct { + // Max connections per hour. + MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` -// Max connections per hour. -MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` - -// Max questions per hour. -MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` + // Max questions per hour. + MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` -// Max updates per hour. -MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` + // Max updates per hour. + MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` -// Max user connections. -MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` + // Max user connections. + MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` } - type ConnectionLimitsParameters struct { + // Max connections per hour. + // +kubebuilder:validation:Optional + MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` -// Max connections per hour. -// +kubebuilder:validation:Optional -MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` - -// Max questions per hour. -// +kubebuilder:validation:Optional -MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` + // Max questions per hour. + // +kubebuilder:validation:Optional + MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` -// Max updates per hour. -// +kubebuilder:validation:Optional -MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` + // Max updates per hour. + // +kubebuilder:validation:Optional + MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` -// Max user connections. -// +kubebuilder:validation:Optional -MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` + // Max user connections. + // +kubebuilder:validation:Optional + MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` } - type MySQLClusterAccessInitParameters struct { + // Allow access for Yandex DataLens. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for Yandex DataLens. -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` - -// Allow access for DataTransfer -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` -// Allows access for SQL queries in the management console. -WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` + // Allows access for SQL queries in the management console. + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` } - type MySQLClusterAccessObservation struct { + // Allow access for Yandex DataLens. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for Yandex DataLens. -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` -// Allow access for DataTransfer -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` - -// Allows access for SQL queries in the management console. -WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` + // Allows access for SQL queries in the management console. + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` } - type MySQLClusterAccessParameters struct { + // Allow access for Yandex DataLens. + // +kubebuilder:validation:Optional + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` -// Allow access for Yandex DataLens. -// +kubebuilder:validation:Optional -DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` + // Allow access for DataTransfer + // +kubebuilder:validation:Optional + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` -// Allow access for DataTransfer -// +kubebuilder:validation:Optional -DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` - -// Allows access for SQL queries in the management console. -// +kubebuilder:validation:Optional -WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` + // Allows access for SQL queries in the management console. + // +kubebuilder:validation:Optional + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` } - type MySQLClusterBackupWindowStartInitParameters struct { + // The hour at which backup will be started. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started. -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started. -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type MySQLClusterBackupWindowStartObservation struct { + // The hour at which backup will be started. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started. -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started. -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type MySQLClusterBackupWindowStartParameters struct { + // The hour at which backup will be started. + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` -// The hour at which backup will be started. -// +kubebuilder:validation:Optional -Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` - -// The minute at which backup will be started. -// +kubebuilder:validation:Optional -Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + // The minute at which backup will be started. + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` } - type MySQLClusterDatabaseInitParameters struct { - -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type MySQLClusterDatabaseObservation struct { - -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type MySQLClusterDatabaseParameters struct { - -// The name of the database. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // The name of the database. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` } - type MySQLClusterHostInitParameters struct { + // Sets whether the host should get a public IP address. It can be changed on the fly only when name is set. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Sets whether the host should get a public IP address. It can be changed on the fly only when name is set. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` - -// Host backup priority. Value is between 0 and 100, default is 0. -BackupPriority *float64 `json:"backupPriority,omitempty" tf:"backup_priority,omitempty"` + // Host backup priority. Value is between 0 and 100, default is 0. + BackupPriority *float64 `json:"backupPriority,omitempty" tf:"backup_priority,omitempty"` -// Host state name. It should be set for all hosts or unset for all hosts. This field can be used by another host, to select which host will be its replication source. Please refer to replication_source_name parameter. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Host state name. It should be set for all hosts or unset for all hosts. This field can be used by another host, to select which host will be its replication source. Please refer to replication_source_name parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Host master promotion priority. Value is between 0 and 100, default is 0. -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // Host master promotion priority. Value is between 0 and 100, default is 0. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// Host replication source name points to host's name from which this host should replicate. When not set then host in HA group. It works only when name is set. -ReplicationSourceName *string `json:"replicationSourceName,omitempty" tf:"replication_source_name,omitempty"` + // Host replication source name points to host's name from which this host should replicate. When not set then host in HA group. It works only when name is set. + ReplicationSourceName *string `json:"replicationSourceName,omitempty" tf:"replication_source_name,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// The availability zone where the MySQL host will be created. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // The availability zone where the MySQL host will be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type MySQLClusterHostObservation struct { + // Sets whether the host should get a public IP address. It can be changed on the fly only when name is set. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Sets whether the host should get a public IP address. It can be changed on the fly only when name is set. -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` - -// Host backup priority. Value is between 0 and 100, default is 0. -BackupPriority *float64 `json:"backupPriority,omitempty" tf:"backup_priority,omitempty"` + // Host backup priority. Value is between 0 and 100, default is 0. + BackupPriority *float64 `json:"backupPriority,omitempty" tf:"backup_priority,omitempty"` -// (Computed) The fully qualified domain name of the host. -Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + // (Computed) The fully qualified domain name of the host. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` -// Host state name. It should be set for all hosts or unset for all hosts. This field can be used by another host, to select which host will be its replication source. Please refer to replication_source_name parameter. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Host state name. It should be set for all hosts or unset for all hosts. This field can be used by another host, to select which host will be its replication source. Please refer to replication_source_name parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Host master promotion priority. Value is between 0 and 100, default is 0. -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // Host master promotion priority. Value is between 0 and 100, default is 0. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// (Computed) Host replication source (fqdn), when replication_source is empty then host is in HA group. -ReplicationSource *string `json:"replicationSource,omitempty" tf:"replication_source,omitempty"` + // (Computed) Host replication source (fqdn), when replication_source is empty then host is in HA group. + ReplicationSource *string `json:"replicationSource,omitempty" tf:"replication_source,omitempty"` -// Host replication source name points to host's name from which this host should replicate. When not set then host in HA group. It works only when name is set. -ReplicationSourceName *string `json:"replicationSourceName,omitempty" tf:"replication_source_name,omitempty"` + // Host replication source name points to host's name from which this host should replicate. When not set then host in HA group. It works only when name is set. + ReplicationSourceName *string `json:"replicationSourceName,omitempty" tf:"replication_source_name,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The availability zone where the MySQL host will be created. -Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` + // The availability zone where the MySQL host will be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` } - type MySQLClusterHostParameters struct { + // Sets whether the host should get a public IP address. It can be changed on the fly only when name is set. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` -// Sets whether the host should get a public IP address. It can be changed on the fly only when name is set. -// +kubebuilder:validation:Optional -AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + // Host backup priority. Value is between 0 and 100, default is 0. + // +kubebuilder:validation:Optional + BackupPriority *float64 `json:"backupPriority,omitempty" tf:"backup_priority,omitempty"` -// Host backup priority. Value is between 0 and 100, default is 0. -// +kubebuilder:validation:Optional -BackupPriority *float64 `json:"backupPriority,omitempty" tf:"backup_priority,omitempty"` + // Host state name. It should be set for all hosts or unset for all hosts. This field can be used by another host, to select which host will be its replication source. Please refer to replication_source_name parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Host state name. It should be set for all hosts or unset for all hosts. This field can be used by another host, to select which host will be its replication source. Please refer to replication_source_name parameter. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Host master promotion priority. Value is between 0 and 100, default is 0. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// Host master promotion priority. Value is between 0 and 100, default is 0. -// +kubebuilder:validation:Optional -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // Host replication source name points to host's name from which this host should replicate. When not set then host in HA group. It works only when name is set. + // +kubebuilder:validation:Optional + ReplicationSourceName *string `json:"replicationSourceName,omitempty" tf:"replication_source_name,omitempty"` -// Host replication source name points to host's name from which this host should replicate. When not set then host in HA group. It works only when name is set. -// +kubebuilder:validation:Optional -ReplicationSourceName *string `json:"replicationSourceName,omitempty" tf:"replication_source_name,omitempty"` + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` -// The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet -// +kubebuilder:validation:Optional -SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` -// Reference to a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` -// Selector for a Subnet in vpc to populate subnetId. -// +kubebuilder:validation:Optional -SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` - -// The availability zone where the MySQL host will be created. -// +kubebuilder:validation:Optional -Zone *string `json:"zone" tf:"zone,omitempty"` + // The availability zone where the MySQL host will be created. + // +kubebuilder:validation:Optional + Zone *string `json:"zone" tf:"zone,omitempty"` } - type MySQLClusterInitParameters struct { + // Access policy to the MySQL cluster. The structure is documented below. + Access []MySQLClusterAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the MySQL cluster. The structure is documented below. -Access []MySQLClusterAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` + // A host of the MySQL cluster. The structure is documented below. + AllowRegenerationHost *bool `json:"allowRegenerationHost,omitempty" tf:"allow_regeneration_host,omitempty"` -// A host of the MySQL cluster. The structure is documented below. -AllowRegenerationHost *bool `json:"allowRegenerationHost,omitempty" tf:"allow_regeneration_host,omitempty"` + // The period in days during which backups are stored. + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` -// The period in days during which backups are stored. -BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + // Time to start the daily backup, in the UTC. The structure is documented below. + BackupWindowStart []MySQLClusterBackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Time to start the daily backup, in the UTC. The structure is documented below. -BackupWindowStart []MySQLClusterBackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // (Deprecated) To manage databases, please switch to using a separate resource type yandex_mdb_mysql_databases. + Database []MySQLClusterDatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` -// (Deprecated) To manage databases, please switch to using a separate resource type yandex_mdb_mysql_databases. -Database []MySQLClusterDatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Description of the MySQL cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the MySQL cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Deployment environment of the MySQL cluster. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// Deployment environment of the MySQL cluster. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // A host of the MySQL cluster. The structure is documented below. + Host []MySQLClusterHostInitParameters `json:"host,omitempty" tf:"host,omitempty"` -// A host of the MySQL cluster. The structure is documented below. -Host []MySQLClusterHostInitParameters `json:"host,omitempty" tf:"host,omitempty"` + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` -// +listType=set -HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + // A set of key/value label pairs to assign to the MySQL cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the MySQL cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Maintenance policy of the MySQL cluster. The structure is documented below. + MaintenanceWindow []MySQLClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Maintenance policy of the MySQL cluster. The structure is documented below. -MaintenanceWindow []MySQLClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // MySQL cluster config. Detail info in "MySQL config" section (documented below). + // +mapType=granular + MySQLConfig map[string]*string `json:"mysqlConfig,omitempty" tf:"mysql_config,omitempty"` -// MySQL cluster config. Detail info in "MySQL config" section (documented below). -// +mapType=granular -MySQLConfig map[string]*string `json:"mysqlConfig,omitempty" tf:"mysql_config,omitempty"` + // Name of the MySQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the MySQL cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // ID of the network, to which the MySQL cluster uses. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// ID of the network, to which the MySQL cluster uses. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // Cluster performance diagnostics settings. The structure is documented below. YC Documentation + PerformanceDiagnostics []MySQLClusterPerformanceDiagnosticsInitParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` -// Cluster performance diagnostics settings. The structure is documented below. YC Documentation -PerformanceDiagnostics []MySQLClusterPerformanceDiagnosticsInitParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` + // Resources allocated to hosts of the MySQL cluster. The structure is documented below. + Resources []MySQLClusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` -// Resources allocated to hosts of the MySQL cluster. The structure is documented below. -Resources []MySQLClusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // The cluster will be created from the specified backup. The structure is documented below. + Restore []MySQLClusterRestoreInitParameters `json:"restore,omitempty" tf:"restore,omitempty"` -// The cluster will be created from the specified backup. The structure is documented below. -Restore []MySQLClusterRestoreInitParameters `json:"restore,omitempty" tf:"restore,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + // (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_mysql_user. + User []MySQLClusterUserInitParameters `json:"user,omitempty" tf:"user,omitempty"` -// (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_mysql_user. -User []MySQLClusterUserInitParameters `json:"user,omitempty" tf:"user,omitempty"` - -// Version of the MySQL cluster. (allowed versions are: 5.7, 8.0) -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of the MySQL cluster. (allowed versions are: 5.7, 8.0) + Version *string `json:"version,omitempty" tf:"version,omitempty"` } - type MySQLClusterMaintenanceWindowInitParameters struct { + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type MySQLClusterMaintenanceWindowObservation struct { + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` } - type MySQLClusterMaintenanceWindowParameters struct { + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` -// Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -// +kubebuilder:validation:Optional -Day *string `json:"day,omitempty" tf:"day,omitempty"` - -// Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. -// +kubebuilder:validation:Optional -Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + // Hour of the day in UTC (in HH format). Allowed value is between 0 and 23. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` -// Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. -// +kubebuilder:validation:Optional -Type *string `json:"type" tf:"type,omitempty"` + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` } - type MySQLClusterObservation struct { + // Access policy to the MySQL cluster. The structure is documented below. + Access []MySQLClusterAccessObservation `json:"access,omitempty" tf:"access,omitempty"` -// Access policy to the MySQL cluster. The structure is documented below. -Access []MySQLClusterAccessObservation `json:"access,omitempty" tf:"access,omitempty"` + // A host of the MySQL cluster. The structure is documented below. + AllowRegenerationHost *bool `json:"allowRegenerationHost,omitempty" tf:"allow_regeneration_host,omitempty"` -// A host of the MySQL cluster. The structure is documented below. -AllowRegenerationHost *bool `json:"allowRegenerationHost,omitempty" tf:"allow_regeneration_host,omitempty"` + // The period in days during which backups are stored. + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` -// The period in days during which backups are stored. -BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + // Time to start the daily backup, in the UTC. The structure is documented below. + BackupWindowStart []MySQLClusterBackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` -// Time to start the daily backup, in the UTC. The structure is documented below. -BackupWindowStart []MySQLClusterBackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Creation timestamp of the cluster. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Creation timestamp of the cluster. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // (Deprecated) To manage databases, please switch to using a separate resource type yandex_mdb_mysql_databases. + Database []MySQLClusterDatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` -// (Deprecated) To manage databases, please switch to using a separate resource type yandex_mdb_mysql_databases. -Database []MySQLClusterDatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Description of the MySQL cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the MySQL cluster. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Deployment environment of the MySQL cluster. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// Deployment environment of the MySQL cluster. -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Aggregated health of the cluster. + Health *string `json:"health,omitempty" tf:"health,omitempty"` -// Aggregated health of the cluster. -Health *string `json:"health,omitempty" tf:"health,omitempty"` + // A host of the MySQL cluster. The structure is documented below. + Host []MySQLClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` -// A host of the MySQL cluster. The structure is documented below. -Host []MySQLClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` -// +listType=set -HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // A set of key/value label pairs to assign to the MySQL cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the MySQL cluster. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Maintenance policy of the MySQL cluster. The structure is documented below. + MaintenanceWindow []MySQLClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// Maintenance policy of the MySQL cluster. The structure is documented below. -MaintenanceWindow []MySQLClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // MySQL cluster config. Detail info in "MySQL config" section (documented below). + // +mapType=granular + MySQLConfig map[string]*string `json:"mysqlConfig,omitempty" tf:"mysql_config,omitempty"` -// MySQL cluster config. Detail info in "MySQL config" section (documented below). -// +mapType=granular -MySQLConfig map[string]*string `json:"mysqlConfig,omitempty" tf:"mysql_config,omitempty"` + // Name of the MySQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the MySQL cluster. Provided by the client when the cluster is created. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // ID of the network, to which the MySQL cluster uses. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// ID of the network, to which the MySQL cluster uses. -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Cluster performance diagnostics settings. The structure is documented below. YC Documentation + PerformanceDiagnostics []MySQLClusterPerformanceDiagnosticsObservation `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` -// Cluster performance diagnostics settings. The structure is documented below. YC Documentation -PerformanceDiagnostics []MySQLClusterPerformanceDiagnosticsObservation `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` + // Resources allocated to hosts of the MySQL cluster. The structure is documented below. + Resources []MySQLClusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` -// Resources allocated to hosts of the MySQL cluster. The structure is documented below. -Resources []MySQLClusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + // The cluster will be created from the specified backup. The structure is documented below. + Restore []MySQLClusterRestoreObservation `json:"restore,omitempty" tf:"restore,omitempty"` -// The cluster will be created from the specified backup. The structure is documented below. -Restore []MySQLClusterRestoreObservation `json:"restore,omitempty" tf:"restore,omitempty"` + // A set of ids of security groups assigned to hosts of the cluster. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // Status of the cluster. + Status *string `json:"status,omitempty" tf:"status,omitempty"` -// Status of the cluster. -Status *string `json:"status,omitempty" tf:"status,omitempty"` + // (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_mysql_user. + User []MySQLClusterUserObservation `json:"user,omitempty" tf:"user,omitempty"` -// (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_mysql_user. -User []MySQLClusterUserObservation `json:"user,omitempty" tf:"user,omitempty"` - -// Version of the MySQL cluster. (allowed versions are: 5.7, 8.0) -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of the MySQL cluster. (allowed versions are: 5.7, 8.0) + Version *string `json:"version,omitempty" tf:"version,omitempty"` } - type MySQLClusterParameters struct { + // Access policy to the MySQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Access []MySQLClusterAccessParameters `json:"access,omitempty" tf:"access,omitempty"` + + // A host of the MySQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + AllowRegenerationHost *bool `json:"allowRegenerationHost,omitempty" tf:"allow_regeneration_host,omitempty"` + + // The period in days during which backups are stored. + // +kubebuilder:validation:Optional + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + + // Time to start the daily backup, in the UTC. The structure is documented below. + // +kubebuilder:validation:Optional + BackupWindowStart []MySQLClusterBackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + + // (Deprecated) To manage databases, please switch to using a separate resource type yandex_mdb_mysql_databases. + // +kubebuilder:validation:Optional + Database []MySQLClusterDatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` -// Access policy to the MySQL cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Access []MySQLClusterAccessParameters `json:"access,omitempty" tf:"access,omitempty"` - -// A host of the MySQL cluster. The structure is documented below. -// +kubebuilder:validation:Optional -AllowRegenerationHost *bool `json:"allowRegenerationHost,omitempty" tf:"allow_regeneration_host,omitempty"` + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// The period in days during which backups are stored. -// +kubebuilder:validation:Optional -BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + // Description of the MySQL cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Time to start the daily backup, in the UTC. The structure is documented below. -// +kubebuilder:validation:Optional -BackupWindowStart []MySQLClusterBackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + // Deployment environment of the MySQL cluster. + // +kubebuilder:validation:Optional + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` -// (Deprecated) To manage databases, please switch to using a separate resource type yandex_mdb_mysql_databases. -// +kubebuilder:validation:Optional -Database []MySQLClusterDatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Inhibits deletion of the cluster. Can be either true or false. -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Description of the MySQL cluster. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Deployment environment of the MySQL cluster. -// +kubebuilder:validation:Optional -Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + // A host of the MySQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Host []MySQLClusterHostParameters `json:"host,omitempty" tf:"host,omitempty"` -// The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // +kubebuilder:validation:Optional + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // A set of key/value label pairs to assign to the MySQL cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Maintenance policy of the MySQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + MaintenanceWindow []MySQLClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` -// A host of the MySQL cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Host []MySQLClusterHostParameters `json:"host,omitempty" tf:"host,omitempty"` + // MySQL cluster config. Detail info in "MySQL config" section (documented below). + // +kubebuilder:validation:Optional + // +mapType=granular + MySQLConfig map[string]*string `json:"mysqlConfig,omitempty" tf:"mysql_config,omitempty"` -// +kubebuilder:validation:Optional -// +listType=set -HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + // Name of the MySQL cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// A set of key/value label pairs to assign to the MySQL cluster. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // ID of the network, to which the MySQL cluster uses. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` -// Maintenance policy of the MySQL cluster. The structure is documented below. -// +kubebuilder:validation:Optional -MaintenanceWindow []MySQLClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` -// MySQL cluster config. Detail info in "MySQL config" section (documented below). -// +kubebuilder:validation:Optional -// +mapType=granular -MySQLConfig map[string]*string `json:"mysqlConfig,omitempty" tf:"mysql_config,omitempty"` + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` -// Name of the MySQL cluster. Provided by the client when the cluster is created. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Cluster performance diagnostics settings. The structure is documented below. YC Documentation + // +kubebuilder:validation:Optional + PerformanceDiagnostics []MySQLClusterPerformanceDiagnosticsParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` -// ID of the network, to which the MySQL cluster uses. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Resources allocated to hosts of the MySQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []MySQLClusterResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` -// Reference to a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + // The cluster will be created from the specified backup. The structure is documented below. + // +kubebuilder:validation:Optional + Restore []MySQLClusterRestoreParameters `json:"restore,omitempty" tf:"restore,omitempty"` -// Selector for a Network in vpc to populate networkId. -// +kubebuilder:validation:Optional -NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` -// Cluster performance diagnostics settings. The structure is documented below. YC Documentation -// +kubebuilder:validation:Optional -PerformanceDiagnostics []MySQLClusterPerformanceDiagnosticsParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` -// Resources allocated to hosts of the MySQL cluster. The structure is documented below. -// +kubebuilder:validation:Optional -Resources []MySQLClusterResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` -// The cluster will be created from the specified backup. The structure is documented below. -// +kubebuilder:validation:Optional -Restore []MySQLClusterRestoreParameters `json:"restore,omitempty" tf:"restore,omitempty"` + // (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_mysql_user. + // +kubebuilder:validation:Optional + User []MySQLClusterUserParameters `json:"user,omitempty" tf:"user,omitempty"` -// A set of ids of security groups assigned to hosts of the cluster. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup -// +kubebuilder:validation:Optional -// +listType=set -SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` - -// References to SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` - -// Selector for a list of SecurityGroup in vpc to populate securityGroupIds. -// +kubebuilder:validation:Optional -SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` - -// (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_mysql_user. -// +kubebuilder:validation:Optional -User []MySQLClusterUserParameters `json:"user,omitempty" tf:"user,omitempty"` - -// Version of the MySQL cluster. (allowed versions are: 5.7, 8.0) -// +kubebuilder:validation:Optional -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version of the MySQL cluster. (allowed versions are: 5.7, 8.0) + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` } - type MySQLClusterPerformanceDiagnosticsInitParameters struct { + // Enable performance diagnostics + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// Enable performance diagnostics -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + // Interval (in seconds) for my_stat_activity sampling Acceptable values are 1 to 86400, inclusive. + SessionsSamplingInterval *float64 `json:"sessionsSamplingInterval,omitempty" tf:"sessions_sampling_interval,omitempty"` -// Interval (in seconds) for my_stat_activity sampling Acceptable values are 1 to 86400, inclusive. -SessionsSamplingInterval *float64 `json:"sessionsSamplingInterval,omitempty" tf:"sessions_sampling_interval,omitempty"` - -// Interval (in seconds) for my_stat_statements sampling Acceptable values are 1 to 86400, inclusive. -StatementsSamplingInterval *float64 `json:"statementsSamplingInterval,omitempty" tf:"statements_sampling_interval,omitempty"` + // Interval (in seconds) for my_stat_statements sampling Acceptable values are 1 to 86400, inclusive. + StatementsSamplingInterval *float64 `json:"statementsSamplingInterval,omitempty" tf:"statements_sampling_interval,omitempty"` } - type MySQLClusterPerformanceDiagnosticsObservation struct { + // Enable performance diagnostics + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// Enable performance diagnostics -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -// Interval (in seconds) for my_stat_activity sampling Acceptable values are 1 to 86400, inclusive. -SessionsSamplingInterval *float64 `json:"sessionsSamplingInterval,omitempty" tf:"sessions_sampling_interval,omitempty"` + // Interval (in seconds) for my_stat_activity sampling Acceptable values are 1 to 86400, inclusive. + SessionsSamplingInterval *float64 `json:"sessionsSamplingInterval,omitempty" tf:"sessions_sampling_interval,omitempty"` -// Interval (in seconds) for my_stat_statements sampling Acceptable values are 1 to 86400, inclusive. -StatementsSamplingInterval *float64 `json:"statementsSamplingInterval,omitempty" tf:"statements_sampling_interval,omitempty"` + // Interval (in seconds) for my_stat_statements sampling Acceptable values are 1 to 86400, inclusive. + StatementsSamplingInterval *float64 `json:"statementsSamplingInterval,omitempty" tf:"statements_sampling_interval,omitempty"` } - type MySQLClusterPerformanceDiagnosticsParameters struct { + // Enable performance diagnostics + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` -// Enable performance diagnostics -// +kubebuilder:validation:Optional -Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` - -// Interval (in seconds) for my_stat_activity sampling Acceptable values are 1 to 86400, inclusive. -// +kubebuilder:validation:Optional -SessionsSamplingInterval *float64 `json:"sessionsSamplingInterval" tf:"sessions_sampling_interval,omitempty"` + // Interval (in seconds) for my_stat_activity sampling Acceptable values are 1 to 86400, inclusive. + // +kubebuilder:validation:Optional + SessionsSamplingInterval *float64 `json:"sessionsSamplingInterval" tf:"sessions_sampling_interval,omitempty"` -// Interval (in seconds) for my_stat_statements sampling Acceptable values are 1 to 86400, inclusive. -// +kubebuilder:validation:Optional -StatementsSamplingInterval *float64 `json:"statementsSamplingInterval" tf:"statements_sampling_interval,omitempty"` + // Interval (in seconds) for my_stat_statements sampling Acceptable values are 1 to 86400, inclusive. + // +kubebuilder:validation:Optional + StatementsSamplingInterval *float64 `json:"statementsSamplingInterval" tf:"statements_sampling_interval,omitempty"` } - type MySQLClusterResourcesInitParameters struct { + // Volume of the storage available to a MySQL host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MySQL host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` - -// Type of the storage of MySQL hosts. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + // Type of the storage of MySQL hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type MySQLClusterResourcesObservation struct { + // Volume of the storage available to a MySQL host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` -// Volume of the storage available to a MySQL host, in gigabytes. -DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + // Type of the storage of MySQL hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` -// Type of the storage of MySQL hosts. -DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` - -ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` } - type MySQLClusterResourcesParameters struct { + // Volume of the storage available to a MySQL host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` -// Volume of the storage available to a MySQL host, in gigabytes. -// +kubebuilder:validation:Optional -DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + // Type of the storage of MySQL hosts. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` -// Type of the storage of MySQL hosts. -// +kubebuilder:validation:Optional -DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` - -// +kubebuilder:validation:Optional -ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` } - type MySQLClusterRestoreInitParameters struct { + // Backup ID. The cluster will be created from the specified backup. How to get a list of MySQL backups. + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` -// Backup ID. The cluster will be created from the specified backup. How to get a list of MySQL backups. -BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` - -// Timestamp of the moment to which the MySQL cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. -Time *string `json:"time,omitempty" tf:"time,omitempty"` + // Timestamp of the moment to which the MySQL cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. + Time *string `json:"time,omitempty" tf:"time,omitempty"` } - type MySQLClusterRestoreObservation struct { + // Backup ID. The cluster will be created from the specified backup. How to get a list of MySQL backups. + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` -// Backup ID. The cluster will be created from the specified backup. How to get a list of MySQL backups. -BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` - -// Timestamp of the moment to which the MySQL cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. -Time *string `json:"time,omitempty" tf:"time,omitempty"` + // Timestamp of the moment to which the MySQL cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. + Time *string `json:"time,omitempty" tf:"time,omitempty"` } - type MySQLClusterRestoreParameters struct { + // Backup ID. The cluster will be created from the specified backup. How to get a list of MySQL backups. + // +kubebuilder:validation:Optional + BackupID *string `json:"backupId" tf:"backup_id,omitempty"` -// Backup ID. The cluster will be created from the specified backup. How to get a list of MySQL backups. -// +kubebuilder:validation:Optional -BackupID *string `json:"backupId" tf:"backup_id,omitempty"` - -// Timestamp of the moment to which the MySQL cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. -// +kubebuilder:validation:Optional -Time *string `json:"time,omitempty" tf:"time,omitempty"` + // Timestamp of the moment to which the MySQL cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. + // +kubebuilder:validation:Optional + Time *string `json:"time,omitempty" tf:"time,omitempty"` } - type MySQLClusterUserInitParameters struct { + // Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) + AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` -// Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) -AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` + // User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. + ConnectionLimits []ConnectionLimitsInitParameters `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` -// User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. -ConnectionLimits []ConnectionLimitsInitParameters `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` + // List user's global permissions + // Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + // +listType=set + GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` -// List user's global permissions -// Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. -// +listType=set -GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The password of the user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// The password of the user. -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` - -// Set of permissions granted to the user. The structure is documented below. -Permission []MySQLClusterUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []MySQLClusterUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type MySQLClusterUserObservation struct { + // Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) + AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` -// Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) -AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` + // User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. + ConnectionLimits []ConnectionLimitsObservation `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` -// User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. -ConnectionLimits []ConnectionLimitsObservation `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` + // List user's global permissions + // Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + // +listType=set + GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` -// List user's global permissions -// Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. -// +listType=set -GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Set of permissions granted to the user. The structure is documented below. -Permission []MySQLClusterUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []MySQLClusterUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` } - type MySQLClusterUserParameters struct { + // Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) + // +kubebuilder:validation:Optional + AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` -// Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) -// +kubebuilder:validation:Optional -AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` - -// User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. -// +kubebuilder:validation:Optional -ConnectionLimits []ConnectionLimitsParameters `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` + // User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. + // +kubebuilder:validation:Optional + ConnectionLimits []ConnectionLimitsParameters `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` -// List user's global permissions -// Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. -// +kubebuilder:validation:Optional -// +listType=set -GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` + // List user's global permissions + // Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + // +kubebuilder:validation:Optional + // +listType=set + GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// The password of the user. -// +kubebuilder:validation:Optional -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// Set of permissions granted to the user. The structure is documented below. -// +kubebuilder:validation:Optional -Permission []MySQLClusterUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + // +kubebuilder:validation:Optional + Permission []MySQLClusterUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type MySQLClusterUserPermissionInitParameters struct { + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` - -// List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } - type MySQLClusterUserPermissionObservation struct { + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` - -// List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } - type MySQLClusterUserPermissionParameters struct { + // The name of the database that the permission grants access to. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -// +kubebuilder:validation:Optional -DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` - -// List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. -// +kubebuilder:validation:Optional -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. + // +kubebuilder:validation:Optional + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } // MySQLClusterSpec defines the desired state of MySQLCluster type MySQLClusterSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider MySQLClusterParameters `json:"forProvider"` + ForProvider MySQLClusterParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -886,20 +809,19 @@ type MySQLClusterSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider MySQLClusterInitParameters `json:"initProvider,omitempty"` + InitProvider MySQLClusterInitParameters `json:"initProvider,omitempty"` } // MySQLClusterStatus defines the observed state of MySQLCluster. type MySQLClusterStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider MySQLClusterObservation `json:"atProvider,omitempty"` + AtProvider MySQLClusterObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // MySQLCluster is the Schema for the MySQLClusters API. Manages a MySQL cluster within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -909,13 +831,13 @@ type MySQLClusterStatus struct { type MySQLCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.host) || (has(self.initProvider) && has(self.initProvider.host))",message="spec.forProvider.host is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resources) || (has(self.initProvider) && has(self.initProvider.resources))",message="spec.forProvider.resources is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" - Spec MySQLClusterSpec `json:"spec"` - Status MySQLClusterStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.host) || (has(self.initProvider) && has(self.initProvider.host))",message="spec.forProvider.host is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resources) || (has(self.initProvider) && has(self.initProvider.resources))",message="spec.forProvider.resources is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec MySQLClusterSpec `json:"spec"` + Status MySQLClusterStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_mysqldatabase_terraformed.go b/apis/mdb/v1alpha1/zz_mysqldatabase_terraformed.go new file mode 100755 index 0000000..0585cad --- /dev/null +++ b/apis/mdb/v1alpha1/zz_mysqldatabase_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MySQLDatabase +func (mg *MySQLDatabase) GetTerraformResourceType() string { + return "yandex_mdb_mysql_database" +} + +// GetConnectionDetailsMapping for this MySQLDatabase +func (tr *MySQLDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MySQLDatabase +func (tr *MySQLDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MySQLDatabase +func (tr *MySQLDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MySQLDatabase +func (tr *MySQLDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MySQLDatabase +func (tr *MySQLDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MySQLDatabase +func (tr *MySQLDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MySQLDatabase +func (tr *MySQLDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MySQLDatabase +func (tr *MySQLDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MySQLDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MySQLDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &MySQLDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MySQLDatabase) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_mysqldatabase_types.go b/apis/mdb/v1alpha1/zz_mysqldatabase_types.go index c65e90b..cf48643 100755 --- a/apis/mdb/v1alpha1/zz_mysqldatabase_types.go +++ b/apis/mdb/v1alpha1/zz_mysqldatabase_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,67 +7,57 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type MySQLDatabaseInitParameters struct { + // +crossplane:generate:reference:type=MySQLCluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=MySQLCluster -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a MySQLCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a MySQLCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a MySQLCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a MySQLCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type MySQLDatabaseObservation struct { + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` - -// The name of the database. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type MySQLDatabaseParameters struct { + // +crossplane:generate:reference:type=MySQLCluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=MySQLCluster -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` - -// Reference to a MySQLCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a MySQLCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a MySQLCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a MySQLCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// The name of the database. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the database. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } // MySQLDatabaseSpec defines the desired state of MySQLDatabase type MySQLDatabaseSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider MySQLDatabaseParameters `json:"forProvider"` + ForProvider MySQLDatabaseParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -80,20 +68,19 @@ type MySQLDatabaseSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider MySQLDatabaseInitParameters `json:"initProvider,omitempty"` + InitProvider MySQLDatabaseInitParameters `json:"initProvider,omitempty"` } // MySQLDatabaseStatus defines the observed state of MySQLDatabase. type MySQLDatabaseStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider MySQLDatabaseObservation `json:"atProvider,omitempty"` + AtProvider MySQLDatabaseObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // MySQLDatabase is the Schema for the MySQLDatabases API. Manages a MySQL database within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -103,9 +90,9 @@ type MySQLDatabaseStatus struct { type MySQLDatabase struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec MySQLDatabaseSpec `json:"spec"` - Status MySQLDatabaseStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec MySQLDatabaseSpec `json:"spec"` + Status MySQLDatabaseStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_mysqluser_terraformed.go b/apis/mdb/v1alpha1/zz_mysqluser_terraformed.go new file mode 100755 index 0000000..fc24b4e --- /dev/null +++ b/apis/mdb/v1alpha1/zz_mysqluser_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MySQLUser +func (mg *MySQLUser) GetTerraformResourceType() string { + return "yandex_mdb_mysql_user" +} + +// GetConnectionDetailsMapping for this MySQLUser +func (tr *MySQLUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "passwordSecretRef"} +} + +// GetObservation of this MySQLUser +func (tr *MySQLUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MySQLUser +func (tr *MySQLUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MySQLUser +func (tr *MySQLUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MySQLUser +func (tr *MySQLUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MySQLUser +func (tr *MySQLUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MySQLUser +func (tr *MySQLUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MySQLUser +func (tr *MySQLUser) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MySQLUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MySQLUser) LateInitialize(attrs []byte) (bool, error) { + params := &MySQLUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MySQLUser) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_mysqluser_types.go b/apis/mdb/v1alpha1/zz_mysqluser_types.go index e8ea63c..63e326e 100755 --- a/apis/mdb/v1alpha1/zz_mysqluser_types.go +++ b/apis/mdb/v1alpha1/zz_mysqluser_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,230 +7,209 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type MySQLUserConnectionLimitsInitParameters struct { + // Max connections per hour. + MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` -// Max connections per hour. -MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` + // Max questions per hour. + MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` -// Max questions per hour. -MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` + // Max updates per hour. + MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` -// Max updates per hour. -MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` - -// Max user connections. -MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` + // Max user connections. + MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` } - type MySQLUserConnectionLimitsObservation struct { + // Max connections per hour. + MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` -// Max connections per hour. -MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` - -// Max questions per hour. -MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` + // Max questions per hour. + MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` -// Max updates per hour. -MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` + // Max updates per hour. + MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` -// Max user connections. -MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` + // Max user connections. + MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` } - type MySQLUserConnectionLimitsParameters struct { + // Max connections per hour. + // +kubebuilder:validation:Optional + MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` -// Max connections per hour. -// +kubebuilder:validation:Optional -MaxConnectionsPerHour *float64 `json:"maxConnectionsPerHour,omitempty" tf:"max_connections_per_hour,omitempty"` - -// Max questions per hour. -// +kubebuilder:validation:Optional -MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` + // Max questions per hour. + // +kubebuilder:validation:Optional + MaxQuestionsPerHour *float64 `json:"maxQuestionsPerHour,omitempty" tf:"max_questions_per_hour,omitempty"` -// Max updates per hour. -// +kubebuilder:validation:Optional -MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` + // Max updates per hour. + // +kubebuilder:validation:Optional + MaxUpdatesPerHour *float64 `json:"maxUpdatesPerHour,omitempty" tf:"max_updates_per_hour,omitempty"` -// Max user connections. -// +kubebuilder:validation:Optional -MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` + // Max user connections. + // +kubebuilder:validation:Optional + MaxUserConnections *float64 `json:"maxUserConnections,omitempty" tf:"max_user_connections,omitempty"` } - type MySQLUserInitParameters struct { + // Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) + AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` -// Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) -AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` + // +crossplane:generate:reference:type=MySQLCluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// +crossplane:generate:reference:type=MySQLCluster -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + // Reference to a MySQLCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Reference to a MySQLCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Selector for a MySQLCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// Selector for a MySQLCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. + ConnectionLimits []MySQLUserConnectionLimitsInitParameters `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` -// User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. -ConnectionLimits []MySQLUserConnectionLimitsInitParameters `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` + // List user's global permissions + // Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + // +listType=set + GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` -// List user's global permissions -// Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. -// +listType=set -GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The password of the user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// The password of the user. -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` - -// Set of permissions granted to the user. The structure is documented below. -Permission []MySQLUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []MySQLUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type MySQLUserObservation struct { + // Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) + AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` -// Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) -AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` - -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. -ConnectionLimits []MySQLUserConnectionLimitsObservation `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` + // User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. + ConnectionLimits []MySQLUserConnectionLimitsObservation `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` -// List user's global permissions -// Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. -// +listType=set -GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` + // List user's global permissions + // Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + // +listType=set + GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// The name of the user. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Set of permissions granted to the user. The structure is documented below. -Permission []MySQLUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + Permission []MySQLUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` } - type MySQLUserParameters struct { + // Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) + // +kubebuilder:validation:Optional + AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` -// Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD) -// +kubebuilder:validation:Optional -AuthenticationPlugin *string `json:"authenticationPlugin,omitempty" tf:"authentication_plugin,omitempty"` - -// +crossplane:generate:reference:type=MySQLCluster -// +kubebuilder:validation:Optional -ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + // +crossplane:generate:reference:type=MySQLCluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` -// Reference to a MySQLCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + // Reference to a MySQLCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` -// Selector for a MySQLCluster to populate clusterId. -// +kubebuilder:validation:Optional -ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // Selector for a MySQLCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` -// User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. -// +kubebuilder:validation:Optional -ConnectionLimits []MySQLUserConnectionLimitsParameters `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` + // User's connection limits. The structure is documented below. If the attribute is not specified there will be no changes. + // +kubebuilder:validation:Optional + ConnectionLimits []MySQLUserConnectionLimitsParameters `json:"connectionLimits,omitempty" tf:"connection_limits,omitempty"` -// List user's global permissions -// Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. -// +kubebuilder:validation:Optional -// +listType=set -GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` + // List user's global permissions + // Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + // +kubebuilder:validation:Optional + // +listType=set + GlobalPermissions []*string `json:"globalPermissions,omitempty" tf:"global_permissions,omitempty"` -// The name of the user. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// The password of the user. -// +kubebuilder:validation:Optional -PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` -// Set of permissions granted to the user. The structure is documented below. -// +kubebuilder:validation:Optional -Permission []MySQLUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` + // Set of permissions granted to the user. The structure is documented below. + // +kubebuilder:validation:Optional + Permission []MySQLUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` } - type MySQLUserPermissionInitParameters struct { + // The name of the database that the permission grants access to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + // Reference to a MySQLDatabase in mdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` -// Reference to a MySQLDatabase in mdb to populate databaseName. -// +kubebuilder:validation:Optional -DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + // Selector for a MySQLDatabase in mdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` -// Selector for a MySQLDatabase in mdb to populate databaseName. -// +kubebuilder:validation:Optional -DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` - -// List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } - type MySQLUserPermissionObservation struct { + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` - -// List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } - type MySQLUserPermissionParameters struct { + // The name of the database that the permission grants access to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` -// The name of the database that the permission grants access to. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.MySQLDatabase -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) -// +kubebuilder:validation:Optional -DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` - -// Reference to a MySQLDatabase in mdb to populate databaseName. -// +kubebuilder:validation:Optional -DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + // Reference to a MySQLDatabase in mdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` -// Selector for a MySQLDatabase in mdb to populate databaseName. -// +kubebuilder:validation:Optional -DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + // Selector for a MySQLDatabase in mdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` -// List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. -// +kubebuilder:validation:Optional -Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + // List user's roles in the database. Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE. + // +kubebuilder:validation:Optional + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` } // MySQLUserSpec defines the desired state of MySQLUser type MySQLUserSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider MySQLUserParameters `json:"forProvider"` + ForProvider MySQLUserParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -243,20 +220,19 @@ type MySQLUserSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider MySQLUserInitParameters `json:"initProvider,omitempty"` + InitProvider MySQLUserInitParameters `json:"initProvider,omitempty"` } // MySQLUserStatus defines the observed state of MySQLUser. type MySQLUserStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider MySQLUserObservation `json:"atProvider,omitempty"` + AtProvider MySQLUserObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // MySQLUser is the Schema for the MySQLUsers API. Manages a MySQL user within Yandex.Cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -266,10 +242,10 @@ type MySQLUserStatus struct { type MySQLUser struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" - Spec MySQLUserSpec `json:"spec"` - Status MySQLUserStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" + Spec MySQLUserSpec `json:"spec"` + Status MySQLUserStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_postgresqlcluster_terraformed.go b/apis/mdb/v1alpha1/zz_postgresqlcluster_terraformed.go new file mode 100755 index 0000000..60ba50c --- /dev/null +++ b/apis/mdb/v1alpha1/zz_postgresqlcluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PostgresqlCluster +func (mg *PostgresqlCluster) GetTerraformResourceType() string { + return "yandex_mdb_postgresql_cluster" +} + +// GetConnectionDetailsMapping for this PostgresqlCluster +func (tr *PostgresqlCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"user[*].password": "user[*].passwordSecretRef"} +} + +// GetObservation of this PostgresqlCluster +func (tr *PostgresqlCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PostgresqlCluster +func (tr *PostgresqlCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PostgresqlCluster +func (tr *PostgresqlCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PostgresqlCluster +func (tr *PostgresqlCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PostgresqlCluster +func (tr *PostgresqlCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PostgresqlCluster +func (tr *PostgresqlCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PostgresqlCluster +func (tr *PostgresqlCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PostgresqlCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PostgresqlCluster) LateInitialize(attrs []byte) (bool, error) { + params := &PostgresqlClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PostgresqlCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_postgresqlcluster_types.go b/apis/mdb/v1alpha1/zz_postgresqlcluster_types.go new file mode 100755 index 0000000..75074e1 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_postgresqlcluster_types.go @@ -0,0 +1,984 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigBackupWindowStartInitParameters struct { + + // The hour at which backup will be started (UTC). + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // The minute at which backup will be started (UTC). + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type ConfigBackupWindowStartObservation struct { + + // The hour at which backup will be started (UTC). + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // The minute at which backup will be started (UTC). + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type ConfigBackupWindowStartParameters struct { + + // The hour at which backup will be started (UTC). + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // The minute at which backup will be started (UTC). + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type ConfigDiskSizeAutoscalingInitParameters struct { + + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` +} + +type ConfigDiskSizeAutoscalingObservation struct { + + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` +} + +type ConfigDiskSizeAutoscalingParameters struct { + + // Limit of disk size after autoscaling (GiB). + // +kubebuilder:validation:Optional + DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` + + // Immediate autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + + // Maintenance window autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` +} + +type ConfigPerformanceDiagnosticsInitParameters struct { + + // Enable performance diagnostics + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Interval (in seconds) for pg_stat_activity sampling Acceptable values are 1 to 86400, inclusive. + SessionsSamplingInterval *float64 `json:"sessionsSamplingInterval,omitempty" tf:"sessions_sampling_interval,omitempty"` + + // Interval (in seconds) for pg_stat_statements sampling Acceptable values are 1 to 86400, inclusive. + StatementsSamplingInterval *float64 `json:"statementsSamplingInterval,omitempty" tf:"statements_sampling_interval,omitempty"` +} + +type ConfigPerformanceDiagnosticsObservation struct { + + // Enable performance diagnostics + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Interval (in seconds) for pg_stat_activity sampling Acceptable values are 1 to 86400, inclusive. + SessionsSamplingInterval *float64 `json:"sessionsSamplingInterval,omitempty" tf:"sessions_sampling_interval,omitempty"` + + // Interval (in seconds) for pg_stat_statements sampling Acceptable values are 1 to 86400, inclusive. + StatementsSamplingInterval *float64 `json:"statementsSamplingInterval,omitempty" tf:"statements_sampling_interval,omitempty"` +} + +type ConfigPerformanceDiagnosticsParameters struct { + + // Enable performance diagnostics + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Interval (in seconds) for pg_stat_activity sampling Acceptable values are 1 to 86400, inclusive. + // +kubebuilder:validation:Optional + SessionsSamplingInterval *float64 `json:"sessionsSamplingInterval" tf:"sessions_sampling_interval,omitempty"` + + // Interval (in seconds) for pg_stat_statements sampling Acceptable values are 1 to 86400, inclusive. + // +kubebuilder:validation:Optional + StatementsSamplingInterval *float64 `json:"statementsSamplingInterval" tf:"statements_sampling_interval,omitempty"` +} + +type ConfigPoolerConfigInitParameters struct { + + // Setting pool_discard parameter in Odyssey. + PoolDiscard *bool `json:"poolDiscard,omitempty" tf:"pool_discard,omitempty"` + + // Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + PoolingMode *string `json:"poolingMode,omitempty" tf:"pooling_mode,omitempty"` +} + +type ConfigPoolerConfigObservation struct { + + // Setting pool_discard parameter in Odyssey. + PoolDiscard *bool `json:"poolDiscard,omitempty" tf:"pool_discard,omitempty"` + + // Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + PoolingMode *string `json:"poolingMode,omitempty" tf:"pooling_mode,omitempty"` +} + +type ConfigPoolerConfigParameters struct { + + // Setting pool_discard parameter in Odyssey. + // +kubebuilder:validation:Optional + PoolDiscard *bool `json:"poolDiscard,omitempty" tf:"pool_discard,omitempty"` + + // Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + // +kubebuilder:validation:Optional + PoolingMode *string `json:"poolingMode,omitempty" tf:"pooling_mode,omitempty"` +} + +type ConfigResourcesInitParameters struct { + + // Volume of the storage available to a PostgreSQL host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Type of the storage of PostgreSQL hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` +} + +type ConfigResourcesObservation struct { + + // Volume of the storage available to a PostgreSQL host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Type of the storage of PostgreSQL hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` +} + +type ConfigResourcesParameters struct { + + // Volume of the storage available to a PostgreSQL host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + + // Type of the storage of PostgreSQL hosts. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` +} + +type ExtensionInitParameters struct { + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Version of the PostgreSQL cluster. (allowed versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, 13-1c, 14, 14-1c, 15, 15-1c, 16) + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ExtensionObservation struct { + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Version of the PostgreSQL cluster. (allowed versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, 13-1c, 14, 14-1c, 15, 15-1c, 16) + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ExtensionParameters struct { + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Version of the PostgreSQL cluster. (allowed versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, 13-1c, 14, 14-1c, 15, 15-1c, 16) + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PostgresqlClusterConfigAccessInitParameters struct { + + // Allow access for Yandex DataLens. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` + + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + + // Allow access for connection to managed databases from functions + Serverless *bool `json:"serverless,omitempty" tf:"serverless,omitempty"` + + // Allow access for SQL queries in the management console + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` +} + +type PostgresqlClusterConfigAccessObservation struct { + + // Allow access for Yandex DataLens. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` + + // Allow access for DataTransfer + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + + // Allow access for connection to managed databases from functions + Serverless *bool `json:"serverless,omitempty" tf:"serverless,omitempty"` + + // Allow access for SQL queries in the management console + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` +} + +type PostgresqlClusterConfigAccessParameters struct { + + // Allow access for Yandex DataLens. + // +kubebuilder:validation:Optional + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` + + // Allow access for DataTransfer + // +kubebuilder:validation:Optional + DataTransfer *bool `json:"dataTransfer,omitempty" tf:"data_transfer,omitempty"` + + // Allow access for connection to managed databases from functions + // +kubebuilder:validation:Optional + Serverless *bool `json:"serverless,omitempty" tf:"serverless,omitempty"` + + // Allow access for SQL queries in the management console + // +kubebuilder:validation:Optional + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` +} + +type PostgresqlClusterConfigInitParameters struct { + + // Access policy to the PostgreSQL cluster. The structure is documented below. + Access []PostgresqlClusterConfigAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` + + // Configuration setting which enables/disables autofailover in cluster. + Autofailover *bool `json:"autofailover,omitempty" tf:"autofailover,omitempty"` + + // The period in days during which backups are stored. + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + BackupWindowStart []ConfigBackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + + // Cluster disk size autoscaling settings. The structure is documented below. + DiskSizeAutoscaling []ConfigDiskSizeAutoscalingInitParameters `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` + + // Cluster performance diagnostics settings. The structure is documented below. YC Documentation + PerformanceDiagnostics []ConfigPerformanceDiagnosticsInitParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` + + // Configuration of the connection pooler. The structure is documented below. + PoolerConfig []ConfigPoolerConfigInitParameters `json:"poolerConfig,omitempty" tf:"pooler_config,omitempty"` + + // PostgreSQL cluster config. Detail info in "postresql config" section (documented below). + // +mapType=granular + PostgresqlConfig map[string]*string `json:"postgresqlConfig,omitempty" tf:"postgresql_config,omitempty"` + + // Resources allocated to hosts of the PostgreSQL cluster. The structure is documented below. + Resources []ConfigResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // Version of the PostgreSQL cluster. (allowed versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, 13-1c, 14, 14-1c, 15, 15-1c, 16) + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PostgresqlClusterConfigObservation struct { + + // Access policy to the PostgreSQL cluster. The structure is documented below. + Access []PostgresqlClusterConfigAccessObservation `json:"access,omitempty" tf:"access,omitempty"` + + // Configuration setting which enables/disables autofailover in cluster. + Autofailover *bool `json:"autofailover,omitempty" tf:"autofailover,omitempty"` + + // The period in days during which backups are stored. + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + BackupWindowStart []ConfigBackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + + // Cluster disk size autoscaling settings. The structure is documented below. + DiskSizeAutoscaling []ConfigDiskSizeAutoscalingObservation `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` + + // Cluster performance diagnostics settings. The structure is documented below. YC Documentation + PerformanceDiagnostics []ConfigPerformanceDiagnosticsObservation `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` + + // Configuration of the connection pooler. The structure is documented below. + PoolerConfig []ConfigPoolerConfigObservation `json:"poolerConfig,omitempty" tf:"pooler_config,omitempty"` + + // PostgreSQL cluster config. Detail info in "postresql config" section (documented below). + // +mapType=granular + PostgresqlConfig map[string]*string `json:"postgresqlConfig,omitempty" tf:"postgresql_config,omitempty"` + + // Resources allocated to hosts of the PostgreSQL cluster. The structure is documented below. + Resources []ConfigResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // Version of the PostgreSQL cluster. (allowed versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, 13-1c, 14, 14-1c, 15, 15-1c, 16) + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PostgresqlClusterConfigParameters struct { + + // Access policy to the PostgreSQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Access []PostgresqlClusterConfigAccessParameters `json:"access,omitempty" tf:"access,omitempty"` + + // Configuration setting which enables/disables autofailover in cluster. + // +kubebuilder:validation:Optional + Autofailover *bool `json:"autofailover,omitempty" tf:"autofailover,omitempty"` + + // The period in days during which backups are stored. + // +kubebuilder:validation:Optional + BackupRetainPeriodDays *float64 `json:"backupRetainPeriodDays,omitempty" tf:"backup_retain_period_days,omitempty"` + + // Time to start the daily backup, in the UTC timezone. The structure is documented below. + // +kubebuilder:validation:Optional + BackupWindowStart []ConfigBackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + + // Cluster disk size autoscaling settings. The structure is documented below. + // +kubebuilder:validation:Optional + DiskSizeAutoscaling []ConfigDiskSizeAutoscalingParameters `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` + + // Cluster performance diagnostics settings. The structure is documented below. YC Documentation + // +kubebuilder:validation:Optional + PerformanceDiagnostics []ConfigPerformanceDiagnosticsParameters `json:"performanceDiagnostics,omitempty" tf:"performance_diagnostics,omitempty"` + + // Configuration of the connection pooler. The structure is documented below. + // +kubebuilder:validation:Optional + PoolerConfig []ConfigPoolerConfigParameters `json:"poolerConfig,omitempty" tf:"pooler_config,omitempty"` + + // PostgreSQL cluster config. Detail info in "postresql config" section (documented below). + // +kubebuilder:validation:Optional + // +mapType=granular + PostgresqlConfig map[string]*string `json:"postgresqlConfig,omitempty" tf:"postgresql_config,omitempty"` + + // Resources allocated to hosts of the PostgreSQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []ConfigResourcesParameters `json:"resources" tf:"resources,omitempty"` + + // Version of the PostgreSQL cluster. (allowed versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, 13-1c, 14, 14-1c, 15, 15-1c, 16) + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type PostgresqlClusterDatabaseInitParameters struct { + Extension []ExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + LcCollate *string `json:"lcCollate,omitempty" tf:"lc_collate,omitempty"` + + LcType *string `json:"lcType,omitempty" tf:"lc_type,omitempty"` + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + TemplateDB *string `json:"templateDb,omitempty" tf:"template_db,omitempty"` +} + +type PostgresqlClusterDatabaseObservation struct { + Extension []ExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + LcCollate *string `json:"lcCollate,omitempty" tf:"lc_collate,omitempty"` + + LcType *string `json:"lcType,omitempty" tf:"lc_type,omitempty"` + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + TemplateDB *string `json:"templateDb,omitempty" tf:"template_db,omitempty"` +} + +type PostgresqlClusterDatabaseParameters struct { + + // +kubebuilder:validation:Optional + Extension []ExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // +kubebuilder:validation:Optional + LcCollate *string `json:"lcCollate,omitempty" tf:"lc_collate,omitempty"` + + // +kubebuilder:validation:Optional + LcType *string `json:"lcType,omitempty" tf:"lc_type,omitempty"` + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + Owner *string `json:"owner" tf:"owner,omitempty"` + + // +kubebuilder:validation:Optional + TemplateDB *string `json:"templateDb,omitempty" tf:"template_db,omitempty"` +} + +type PostgresqlClusterHostInitParameters struct { + + // Sets whether the host should get a public IP address on creation. It can be changed on the fly only when name is set. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Host state name. It should be set for all hosts or unset for all hosts. This field can be used by another host, to select which host will be its replication source. Please see replication_source_name parameter. Also, this field is used to select which host will be selected as a master host. Please see host_master_name parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Host priority in HA group. It works only when name is set. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Host replication source name points to host's name from which this host should replicate. When not set then host in HA group. It works only when name is set. + ReplicationSourceName *string `json:"replicationSourceName,omitempty" tf:"replication_source_name,omitempty"` + + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The availability zone where the PostgreSQL host will be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type PostgresqlClusterHostObservation struct { + + // Sets whether the host should get a public IP address on creation. It can be changed on the fly only when name is set. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // (Computed) The fully qualified domain name of the host. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // Host state name. It should be set for all hosts or unset for all hosts. This field can be used by another host, to select which host will be its replication source. Please see replication_source_name parameter. Also, this field is used to select which host will be selected as a master host. Please see host_master_name parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Host priority in HA group. It works only when name is set. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // (Computed) Host replication source (fqdn), when replication_source is empty then host is in HA group. + ReplicationSource *string `json:"replicationSource,omitempty" tf:"replication_source,omitempty"` + + // Host replication source name points to host's name from which this host should replicate. When not set then host in HA group. It works only when name is set. + ReplicationSourceName *string `json:"replicationSourceName,omitempty" tf:"replication_source_name,omitempty"` + + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The availability zone where the PostgreSQL host will be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type PostgresqlClusterHostParameters struct { + + // Sets whether the host should get a public IP address on creation. It can be changed on the fly only when name is set. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Host state name. It should be set for all hosts or unset for all hosts. This field can be used by another host, to select which host will be its replication source. Please see replication_source_name parameter. Also, this field is used to select which host will be selected as a master host. Please see host_master_name parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Host priority in HA group. It works only when name is set. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Host replication source name points to host's name from which this host should replicate. When not set then host in HA group. It works only when name is set. + // +kubebuilder:validation:Optional + ReplicationSourceName *string `json:"replicationSourceName,omitempty" tf:"replication_source_name,omitempty"` + + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The availability zone where the PostgreSQL host will be created. + // +kubebuilder:validation:Optional + Zone *string `json:"zone" tf:"zone,omitempty"` +} + +type PostgresqlClusterInitParameters struct { + + // Configuration of the PostgreSQL cluster. The structure is documented below. + Config []PostgresqlClusterConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` + + // (Deprecated) To manage databases, please switch to using a separate resource type yandex_mdb_postgresql_database. + Database []PostgresqlClusterDatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the PostgreSQL cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Deployment environment of the PostgreSQL cluster. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is unset, the default provider folder_id is used for create. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A host of the PostgreSQL cluster. The structure is documented below. + Host []PostgresqlClusterHostInitParameters `json:"host,omitempty" tf:"host,omitempty"` + + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // It sets name of master host. It works only when host.name is set. + HostMasterName *string `json:"hostMasterName,omitempty" tf:"host_master_name,omitempty"` + + // A set of key/value label pairs to assign to the PostgreSQL cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Maintenance policy of the PostgreSQL cluster. The structure is documented below. + MaintenanceWindow []PostgresqlClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the PostgreSQL cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // The cluster will be created from the specified backup. The structure is documented below. + Restore []PostgresqlClusterRestoreInitParameters `json:"restore,omitempty" tf:"restore,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_postgresql_user. + User []PostgresqlClusterUserInitParameters `json:"user,omitempty" tf:"user,omitempty"` +} + +type PostgresqlClusterMaintenanceWindowInitParameters struct { + + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // Hour of the day in UTC (in HH format). Allowed value is between 1 and 24. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PostgresqlClusterMaintenanceWindowObservation struct { + + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // Hour of the day in UTC (in HH format). Allowed value is between 1 and 24. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PostgresqlClusterMaintenanceWindowParameters struct { + + // Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // Hour of the day in UTC (in HH format). Allowed value is between 1 and 24. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PostgresqlClusterObservation struct { + + // Configuration of the PostgreSQL cluster. The structure is documented below. + Config []PostgresqlClusterConfigObservation `json:"config,omitempty" tf:"config,omitempty"` + + // Timestamp of cluster creation. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // (Deprecated) To manage databases, please switch to using a separate resource type yandex_mdb_postgresql_database. + Database []PostgresqlClusterDatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the PostgreSQL cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Deployment environment of the PostgreSQL cluster. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is unset, the default provider folder_id is used for create. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Aggregated health of the cluster. + Health *string `json:"health,omitempty" tf:"health,omitempty"` + + // A host of the PostgreSQL cluster. The structure is documented below. + Host []PostgresqlClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` + + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // It sets name of master host. It works only when host.name is set. + HostMasterName *string `json:"hostMasterName,omitempty" tf:"host_master_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the PostgreSQL cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Maintenance policy of the PostgreSQL cluster. The structure is documented below. + MaintenanceWindow []PostgresqlClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the PostgreSQL cluster belongs. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // The cluster will be created from the specified backup. The structure is documented below. + Restore []PostgresqlClusterRestoreObservation `json:"restore,omitempty" tf:"restore,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Status of the cluster. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_postgresql_user. + User []PostgresqlClusterUserObservation `json:"user,omitempty" tf:"user,omitempty"` +} + +type PostgresqlClusterParameters struct { + + // Configuration of the PostgreSQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Config []PostgresqlClusterConfigParameters `json:"config,omitempty" tf:"config,omitempty"` + + // (Deprecated) To manage databases, please switch to using a separate resource type yandex_mdb_postgresql_database. + // +kubebuilder:validation:Optional + Database []PostgresqlClusterDatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the PostgreSQL cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Deployment environment of the PostgreSQL cluster. + // +kubebuilder:validation:Optional + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is unset, the default provider folder_id is used for create. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A host of the PostgreSQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Host []PostgresqlClusterHostParameters `json:"host,omitempty" tf:"host,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // It sets name of master host. It works only when host.name is set. + // +kubebuilder:validation:Optional + HostMasterName *string `json:"hostMasterName,omitempty" tf:"host_master_name,omitempty"` + + // A set of key/value label pairs to assign to the PostgreSQL cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Maintenance policy of the PostgreSQL cluster. The structure is documented below. + // +kubebuilder:validation:Optional + MaintenanceWindow []PostgresqlClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the PostgreSQL cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // The cluster will be created from the specified backup. The structure is documented below. + // +kubebuilder:validation:Optional + Restore []PostgresqlClusterRestoreParameters `json:"restore,omitempty" tf:"restore,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // (Deprecated) To manage users, please switch to using a separate resource type yandex_mdb_postgresql_user. + // +kubebuilder:validation:Optional + User []PostgresqlClusterUserParameters `json:"user,omitempty" tf:"user,omitempty"` +} + +type PostgresqlClusterRestoreInitParameters struct { + + // Backup ID. The cluster will be created from the specified backup. How to get a list of PostgreSQL backups. + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` + + // Timestamp of the moment to which the PostgreSQL cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // Flag that indicates whether a database should be restored to the first backup point available just after the timestamp specified in the [time] field instead of just before. + // Possible values: + TimeInclusive *bool `json:"timeInclusive,omitempty" tf:"time_inclusive,omitempty"` +} + +type PostgresqlClusterRestoreObservation struct { + + // Backup ID. The cluster will be created from the specified backup. How to get a list of PostgreSQL backups. + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` + + // Timestamp of the moment to which the PostgreSQL cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // Flag that indicates whether a database should be restored to the first backup point available just after the timestamp specified in the [time] field instead of just before. + // Possible values: + TimeInclusive *bool `json:"timeInclusive,omitempty" tf:"time_inclusive,omitempty"` +} + +type PostgresqlClusterRestoreParameters struct { + + // Backup ID. The cluster will be created from the specified backup. How to get a list of PostgreSQL backups. + // +kubebuilder:validation:Optional + BackupID *string `json:"backupId" tf:"backup_id,omitempty"` + + // Timestamp of the moment to which the PostgreSQL cluster should be restored. (Format: "2006-01-02T15:04:05" - UTC). When not set, current time is used. + // +kubebuilder:validation:Optional + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // Flag that indicates whether a database should be restored to the first backup point available just after the timestamp specified in the [time] field instead of just before. + // Possible values: + // +kubebuilder:validation:Optional + TimeInclusive *bool `json:"timeInclusive,omitempty" tf:"time_inclusive,omitempty"` +} + +type PostgresqlClusterUserInitParameters struct { + ConnLimit *float64 `json:"connLimit,omitempty" tf:"conn_limit,omitempty"` + + Grants []*string `json:"grants,omitempty" tf:"grants,omitempty"` + + Login *bool `json:"login,omitempty" tf:"login,omitempty"` + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + Permission []PostgresqlClusterUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` + + // +mapType=granular + Settings map[string]*string `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type PostgresqlClusterUserObservation struct { + ConnLimit *float64 `json:"connLimit,omitempty" tf:"conn_limit,omitempty"` + + Grants []*string `json:"grants,omitempty" tf:"grants,omitempty"` + + Login *bool `json:"login,omitempty" tf:"login,omitempty"` + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Permission []PostgresqlClusterUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` + + // +mapType=granular + Settings map[string]*string `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type PostgresqlClusterUserParameters struct { + + // +kubebuilder:validation:Optional + ConnLimit *float64 `json:"connLimit,omitempty" tf:"conn_limit,omitempty"` + + // +kubebuilder:validation:Optional + Grants []*string `json:"grants,omitempty" tf:"grants,omitempty"` + + // +kubebuilder:validation:Optional + Login *bool `json:"login,omitempty" tf:"login,omitempty"` + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // +kubebuilder:validation:Optional + Permission []PostgresqlClusterUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` + + // +kubebuilder:validation:Optional + // +mapType=granular + Settings map[string]*string `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type PostgresqlClusterUserPermissionInitParameters struct { + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` +} + +type PostgresqlClusterUserPermissionObservation struct { + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` +} + +type PostgresqlClusterUserPermissionParameters struct { + + // Name of the PostgreSQL cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` +} + +// PostgresqlClusterSpec defines the desired state of PostgresqlCluster +type PostgresqlClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PostgresqlClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PostgresqlClusterInitParameters `json:"initProvider,omitempty"` +} + +// PostgresqlClusterStatus defines the observed state of PostgresqlCluster. +type PostgresqlClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PostgresqlClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// PostgresqlCluster is the Schema for the PostgresqlClusters API. Manages a PostgreSQL cluster within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type PostgresqlCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.config) || (has(self.initProvider) && has(self.initProvider.config))",message="spec.forProvider.config is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.host) || (has(self.initProvider) && has(self.initProvider.host))",message="spec.forProvider.host is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec PostgresqlClusterSpec `json:"spec"` + Status PostgresqlClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PostgresqlClusterList contains a list of PostgresqlClusters +type PostgresqlClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PostgresqlCluster `json:"items"` +} + +// Repository type metadata. +var ( + PostgresqlCluster_Kind = "PostgresqlCluster" + PostgresqlCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PostgresqlCluster_Kind}.String() + PostgresqlCluster_KindAPIVersion = PostgresqlCluster_Kind + "." + CRDGroupVersion.String() + PostgresqlCluster_GroupVersionKind = CRDGroupVersion.WithKind(PostgresqlCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&PostgresqlCluster{}, &PostgresqlClusterList{}) +} diff --git a/apis/mdb/v1alpha1/zz_postgresqldatabase_terraformed.go b/apis/mdb/v1alpha1/zz_postgresqldatabase_terraformed.go new file mode 100755 index 0000000..0f47d39 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_postgresqldatabase_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PostgresqlDatabase +func (mg *PostgresqlDatabase) GetTerraformResourceType() string { + return "yandex_mdb_postgresql_database" +} + +// GetConnectionDetailsMapping for this PostgresqlDatabase +func (tr *PostgresqlDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PostgresqlDatabase +func (tr *PostgresqlDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PostgresqlDatabase +func (tr *PostgresqlDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PostgresqlDatabase +func (tr *PostgresqlDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PostgresqlDatabase +func (tr *PostgresqlDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PostgresqlDatabase +func (tr *PostgresqlDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PostgresqlDatabase +func (tr *PostgresqlDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PostgresqlDatabase +func (tr *PostgresqlDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PostgresqlDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PostgresqlDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &PostgresqlDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PostgresqlDatabase) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_postgresqldatabase_types.go b/apis/mdb/v1alpha1/zz_postgresqldatabase_types.go new file mode 100755 index 0000000..6f6288a --- /dev/null +++ b/apis/mdb/v1alpha1/zz_postgresqldatabase_types.go @@ -0,0 +1,226 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PostgresqlDatabaseExtensionInitParameters struct { + + // Name of the database extension. For more information on available extensions see the official documentation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Version of the extension. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PostgresqlDatabaseExtensionObservation struct { + + // Name of the database extension. For more information on available extensions see the official documentation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Version of the extension. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PostgresqlDatabaseExtensionParameters struct { + + // Name of the database extension. For more information on available extensions see the official documentation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Version of the extension. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PostgresqlDatabaseInitParameters struct { + + // +crossplane:generate:reference:type=PostgresqlCluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Reference to a PostgresqlCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + + // Selector for a PostgresqlCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + + // Inhibits deletion of the database. Can either be true, false or unspecified. + DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Set of database extensions. The structure is documented below + Extension []PostgresqlDatabaseExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // POSIX locale for string sorting order. Forbidden to change in an existing database. + LcCollate *string `json:"lcCollate,omitempty" tf:"lc_collate,omitempty"` + + // POSIX locale for character classification. Forbidden to change in an existing database. + LcType *string `json:"lcType,omitempty" tf:"lc_type,omitempty"` + + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Name of the user assigned as the owner of the database. Forbidden to change in an existing database. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Reference to a PostgresqlUser in mdb to populate owner. + // +kubebuilder:validation:Optional + OwnerRef *v1.Reference `json:"ownerRef,omitempty" tf:"-"` + + // Selector for a PostgresqlUser in mdb to populate owner. + // +kubebuilder:validation:Optional + OwnerSelector *v1.Selector `json:"ownerSelector,omitempty" tf:"-"` + + // Name of the template database. + TemplateDB *string `json:"templateDb,omitempty" tf:"template_db,omitempty"` +} + +type PostgresqlDatabaseObservation struct { + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Inhibits deletion of the database. Can either be true, false or unspecified. + DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Set of database extensions. The structure is documented below + Extension []PostgresqlDatabaseExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // POSIX locale for string sorting order. Forbidden to change in an existing database. + LcCollate *string `json:"lcCollate,omitempty" tf:"lc_collate,omitempty"` + + // POSIX locale for character classification. Forbidden to change in an existing database. + LcType *string `json:"lcType,omitempty" tf:"lc_type,omitempty"` + + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Name of the user assigned as the owner of the database. Forbidden to change in an existing database. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Name of the template database. + TemplateDB *string `json:"templateDb,omitempty" tf:"template_db,omitempty"` +} + +type PostgresqlDatabaseParameters struct { + + // +crossplane:generate:reference:type=PostgresqlCluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Reference to a PostgresqlCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + + // Selector for a PostgresqlCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + + // Inhibits deletion of the database. Can either be true, false or unspecified. + // +kubebuilder:validation:Optional + DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Set of database extensions. The structure is documented below + // +kubebuilder:validation:Optional + Extension []PostgresqlDatabaseExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // POSIX locale for string sorting order. Forbidden to change in an existing database. + // +kubebuilder:validation:Optional + LcCollate *string `json:"lcCollate,omitempty" tf:"lc_collate,omitempty"` + + // POSIX locale for character classification. Forbidden to change in an existing database. + // +kubebuilder:validation:Optional + LcType *string `json:"lcType,omitempty" tf:"lc_type,omitempty"` + + // The name of the database. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Name of the user assigned as the owner of the database. Forbidden to change in an existing database. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1.PostgresqlUser + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Reference to a PostgresqlUser in mdb to populate owner. + // +kubebuilder:validation:Optional + OwnerRef *v1.Reference `json:"ownerRef,omitempty" tf:"-"` + + // Selector for a PostgresqlUser in mdb to populate owner. + // +kubebuilder:validation:Optional + OwnerSelector *v1.Selector `json:"ownerSelector,omitempty" tf:"-"` + + // Name of the template database. + // +kubebuilder:validation:Optional + TemplateDB *string `json:"templateDb,omitempty" tf:"template_db,omitempty"` +} + +// PostgresqlDatabaseSpec defines the desired state of PostgresqlDatabase +type PostgresqlDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PostgresqlDatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PostgresqlDatabaseInitParameters `json:"initProvider,omitempty"` +} + +// PostgresqlDatabaseStatus defines the observed state of PostgresqlDatabase. +type PostgresqlDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PostgresqlDatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// PostgresqlDatabase is the Schema for the PostgresqlDatabases API. Manages a PostgreSQL database within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type PostgresqlDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec PostgresqlDatabaseSpec `json:"spec"` + Status PostgresqlDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PostgresqlDatabaseList contains a list of PostgresqlDatabases +type PostgresqlDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PostgresqlDatabase `json:"items"` +} + +// Repository type metadata. +var ( + PostgresqlDatabase_Kind = "PostgresqlDatabase" + PostgresqlDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PostgresqlDatabase_Kind}.String() + PostgresqlDatabase_KindAPIVersion = PostgresqlDatabase_Kind + "." + CRDGroupVersion.String() + PostgresqlDatabase_GroupVersionKind = CRDGroupVersion.WithKind(PostgresqlDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&PostgresqlDatabase{}, &PostgresqlDatabaseList{}) +} diff --git a/apis/mdb/v1alpha1/zz_postgresqluser_terraformed.go b/apis/mdb/v1alpha1/zz_postgresqluser_terraformed.go new file mode 100755 index 0000000..223fb0f --- /dev/null +++ b/apis/mdb/v1alpha1/zz_postgresqluser_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PostgresqlUser +func (mg *PostgresqlUser) GetTerraformResourceType() string { + return "yandex_mdb_postgresql_user" +} + +// GetConnectionDetailsMapping for this PostgresqlUser +func (tr *PostgresqlUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "passwordSecretRef"} +} + +// GetObservation of this PostgresqlUser +func (tr *PostgresqlUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PostgresqlUser +func (tr *PostgresqlUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PostgresqlUser +func (tr *PostgresqlUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PostgresqlUser +func (tr *PostgresqlUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PostgresqlUser +func (tr *PostgresqlUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PostgresqlUser +func (tr *PostgresqlUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PostgresqlUser +func (tr *PostgresqlUser) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PostgresqlUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PostgresqlUser) LateInitialize(attrs []byte) (bool, error) { + params := &PostgresqlUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PostgresqlUser) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_postgresqluser_types.go b/apis/mdb/v1alpha1/zz_postgresqluser_types.go new file mode 100755 index 0000000..393439a --- /dev/null +++ b/apis/mdb/v1alpha1/zz_postgresqluser_types.go @@ -0,0 +1,207 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PostgresqlUserInitParameters struct { + + // +crossplane:generate:reference:type=PostgresqlCluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Reference to a PostgresqlCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + + // Selector for a PostgresqlCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + + // The maximum number of connections per user. (Default 50) + ConnLimit *float64 `json:"connLimit,omitempty" tf:"conn_limit,omitempty"` + + // Inhibits deletion of the user. Can either be true, false or unspecified. + DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // List of the user's grants. + Grants []*string `json:"grants,omitempty" tf:"grants,omitempty"` + + // User's ability to login. + Login *bool `json:"login,omitempty" tf:"login,omitempty"` + + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password of the user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Set of permissions granted to the user. The structure is documented below. + Permission []PostgresqlUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` + + // Map of user settings. List of settings is documented below. + // +mapType=granular + Settings map[string]*string `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type PostgresqlUserObservation struct { + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // The maximum number of connections per user. (Default 50) + ConnLimit *float64 `json:"connLimit,omitempty" tf:"conn_limit,omitempty"` + + // Inhibits deletion of the user. Can either be true, false or unspecified. + DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // List of the user's grants. + Grants []*string `json:"grants,omitempty" tf:"grants,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // User's ability to login. + Login *bool `json:"login,omitempty" tf:"login,omitempty"` + + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Set of permissions granted to the user. The structure is documented below. + Permission []PostgresqlUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` + + // Map of user settings. List of settings is documented below. + // +mapType=granular + Settings map[string]*string `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type PostgresqlUserParameters struct { + + // +crossplane:generate:reference:type=PostgresqlCluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Reference to a PostgresqlCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDRef *v1.Reference `json:"clusterIdRef,omitempty" tf:"-"` + + // Selector for a PostgresqlCluster to populate clusterId. + // +kubebuilder:validation:Optional + ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + + // The maximum number of connections per user. (Default 50) + // +kubebuilder:validation:Optional + ConnLimit *float64 `json:"connLimit,omitempty" tf:"conn_limit,omitempty"` + + // Inhibits deletion of the user. Can either be true, false or unspecified. + // +kubebuilder:validation:Optional + DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // List of the user's grants. + // +kubebuilder:validation:Optional + Grants []*string `json:"grants,omitempty" tf:"grants,omitempty"` + + // User's ability to login. + // +kubebuilder:validation:Optional + Login *bool `json:"login,omitempty" tf:"login,omitempty"` + + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Set of permissions granted to the user. The structure is documented below. + // +kubebuilder:validation:Optional + Permission []PostgresqlUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` + + // Map of user settings. List of settings is documented below. + // +kubebuilder:validation:Optional + // +mapType=granular + Settings map[string]*string `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type PostgresqlUserPermissionInitParameters struct { + + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` +} + +type PostgresqlUserPermissionObservation struct { + + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` +} + +type PostgresqlUserPermissionParameters struct { + + // The name of the database that the permission grants access to. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` +} + +// PostgresqlUserSpec defines the desired state of PostgresqlUser +type PostgresqlUserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PostgresqlUserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PostgresqlUserInitParameters `json:"initProvider,omitempty"` +} + +// PostgresqlUserStatus defines the observed state of PostgresqlUser. +type PostgresqlUserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PostgresqlUserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// PostgresqlUser is the Schema for the PostgresqlUsers API. Manages a PostgreSQL user within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type PostgresqlUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" + Spec PostgresqlUserSpec `json:"spec"` + Status PostgresqlUserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PostgresqlUserList contains a list of PostgresqlUsers +type PostgresqlUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PostgresqlUser `json:"items"` +} + +// Repository type metadata. +var ( + PostgresqlUser_Kind = "PostgresqlUser" + PostgresqlUser_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PostgresqlUser_Kind}.String() + PostgresqlUser_KindAPIVersion = PostgresqlUser_Kind + "." + CRDGroupVersion.String() + PostgresqlUser_GroupVersionKind = CRDGroupVersion.WithKind(PostgresqlUser_Kind) +) + +func init() { + SchemeBuilder.Register(&PostgresqlUser{}, &PostgresqlUserList{}) +} diff --git a/apis/mdb/v1alpha1/zz_rediscluster_terraformed.go b/apis/mdb/v1alpha1/zz_rediscluster_terraformed.go new file mode 100755 index 0000000..e55015f --- /dev/null +++ b/apis/mdb/v1alpha1/zz_rediscluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RedisCluster +func (mg *RedisCluster) GetTerraformResourceType() string { + return "yandex_mdb_redis_cluster" +} + +// GetConnectionDetailsMapping for this RedisCluster +func (tr *RedisCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"config[*].password": "config[*].passwordSecretRef"} +} + +// GetObservation of this RedisCluster +func (tr *RedisCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RedisCluster +func (tr *RedisCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RedisCluster +func (tr *RedisCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RedisCluster +func (tr *RedisCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RedisCluster +func (tr *RedisCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RedisCluster +func (tr *RedisCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RedisCluster +func (tr *RedisCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RedisCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RedisCluster) LateInitialize(attrs []byte) (bool, error) { + params := &RedisClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RedisCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_rediscluster_types.go b/apis/mdb/v1alpha1/zz_rediscluster_types.go new file mode 100755 index 0000000..4938f54 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_rediscluster_types.go @@ -0,0 +1,669 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RedisClusterAccessInitParameters struct { + + // Allow access for DataLens. Can be either true or false. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` + + // Allow access for Web SQL. Can be either true or false. + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` +} + +type RedisClusterAccessObservation struct { + + // Allow access for DataLens. Can be either true or false. + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` + + // Allow access for Web SQL. Can be either true or false. + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` +} + +type RedisClusterAccessParameters struct { + + // Allow access for DataLens. Can be either true or false. + // +kubebuilder:validation:Optional + DataLens *bool `json:"dataLens,omitempty" tf:"data_lens,omitempty"` + + // Allow access for Web SQL. Can be either true or false. + // +kubebuilder:validation:Optional + WebSQL *bool `json:"webSql,omitempty" tf:"web_sql,omitempty"` +} + +type RedisClusterConfigInitParameters struct { + + // Normal clients output buffer limits. See redis config file. + ClientOutputBufferLimitNormal *string `json:"clientOutputBufferLimitNormal,omitempty" tf:"client_output_buffer_limit_normal,omitempty"` + + // Pubsub clients output buffer limits. See redis config file. + ClientOutputBufferLimitPubsub *string `json:"clientOutputBufferLimitPubsub,omitempty" tf:"client_output_buffer_limit_pubsub,omitempty"` + + // Number of databases (changing requires redis-server restart). + Databases *float64 `json:"databases,omitempty" tf:"databases,omitempty"` + + // Redis maxmemory usage in percent + MaxmemoryPercent *float64 `json:"maxmemoryPercent,omitempty" tf:"maxmemory_percent,omitempty"` + + // Redis key eviction policy for a dataset that reaches maximum memory. Can be any of the listed in the official RedisDB documentation. + MaxmemoryPolicy *string `json:"maxmemoryPolicy,omitempty" tf:"maxmemory_policy,omitempty"` + + // Select the events that Redis will notify among a set of classes. + NotifyKeyspaceEvents *string `json:"notifyKeyspaceEvents,omitempty" tf:"notify_keyspace_events,omitempty"` + + // Password for the Redis cluster. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Log slow queries below this number in microseconds. + SlowlogLogSlowerThan *float64 `json:"slowlogLogSlowerThan,omitempty" tf:"slowlog_log_slower_than,omitempty"` + + // Slow queries log length. + SlowlogMaxLen *float64 `json:"slowlogMaxLen,omitempty" tf:"slowlog_max_len,omitempty"` + + // Close the connection after a client is idle for N seconds. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Version of Redis (6.2). + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type RedisClusterConfigObservation struct { + + // Normal clients output buffer limits. See redis config file. + ClientOutputBufferLimitNormal *string `json:"clientOutputBufferLimitNormal,omitempty" tf:"client_output_buffer_limit_normal,omitempty"` + + // Pubsub clients output buffer limits. See redis config file. + ClientOutputBufferLimitPubsub *string `json:"clientOutputBufferLimitPubsub,omitempty" tf:"client_output_buffer_limit_pubsub,omitempty"` + + // Number of databases (changing requires redis-server restart). + Databases *float64 `json:"databases,omitempty" tf:"databases,omitempty"` + + // Redis maxmemory usage in percent + MaxmemoryPercent *float64 `json:"maxmemoryPercent,omitempty" tf:"maxmemory_percent,omitempty"` + + // Redis key eviction policy for a dataset that reaches maximum memory. Can be any of the listed in the official RedisDB documentation. + MaxmemoryPolicy *string `json:"maxmemoryPolicy,omitempty" tf:"maxmemory_policy,omitempty"` + + // Select the events that Redis will notify among a set of classes. + NotifyKeyspaceEvents *string `json:"notifyKeyspaceEvents,omitempty" tf:"notify_keyspace_events,omitempty"` + + // Log slow queries below this number in microseconds. + SlowlogLogSlowerThan *float64 `json:"slowlogLogSlowerThan,omitempty" tf:"slowlog_log_slower_than,omitempty"` + + // Slow queries log length. + SlowlogMaxLen *float64 `json:"slowlogMaxLen,omitempty" tf:"slowlog_max_len,omitempty"` + + // Close the connection after a client is idle for N seconds. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Version of Redis (6.2). + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type RedisClusterConfigParameters struct { + + // Normal clients output buffer limits. See redis config file. + // +kubebuilder:validation:Optional + ClientOutputBufferLimitNormal *string `json:"clientOutputBufferLimitNormal,omitempty" tf:"client_output_buffer_limit_normal,omitempty"` + + // Pubsub clients output buffer limits. See redis config file. + // +kubebuilder:validation:Optional + ClientOutputBufferLimitPubsub *string `json:"clientOutputBufferLimitPubsub,omitempty" tf:"client_output_buffer_limit_pubsub,omitempty"` + + // Number of databases (changing requires redis-server restart). + // +kubebuilder:validation:Optional + Databases *float64 `json:"databases,omitempty" tf:"databases,omitempty"` + + // Redis maxmemory usage in percent + // +kubebuilder:validation:Optional + MaxmemoryPercent *float64 `json:"maxmemoryPercent,omitempty" tf:"maxmemory_percent,omitempty"` + + // Redis key eviction policy for a dataset that reaches maximum memory. Can be any of the listed in the official RedisDB documentation. + // +kubebuilder:validation:Optional + MaxmemoryPolicy *string `json:"maxmemoryPolicy,omitempty" tf:"maxmemory_policy,omitempty"` + + // Select the events that Redis will notify among a set of classes. + // +kubebuilder:validation:Optional + NotifyKeyspaceEvents *string `json:"notifyKeyspaceEvents,omitempty" tf:"notify_keyspace_events,omitempty"` + + // Password for the Redis cluster. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Log slow queries below this number in microseconds. + // +kubebuilder:validation:Optional + SlowlogLogSlowerThan *float64 `json:"slowlogLogSlowerThan,omitempty" tf:"slowlog_log_slower_than,omitempty"` + + // Slow queries log length. + // +kubebuilder:validation:Optional + SlowlogMaxLen *float64 `json:"slowlogMaxLen,omitempty" tf:"slowlog_max_len,omitempty"` + + // Close the connection after a client is idle for N seconds. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Version of Redis (6.2). + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type RedisClusterDiskSizeAutoscalingInitParameters struct { + + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` +} + +type RedisClusterDiskSizeAutoscalingObservation struct { + + // Limit of disk size after autoscaling (GiB). + DiskSizeLimit *float64 `json:"diskSizeLimit,omitempty" tf:"disk_size_limit,omitempty"` + + // Immediate autoscaling disk usage (percent). + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + + // Maintenance window autoscaling disk usage (percent). + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` +} + +type RedisClusterDiskSizeAutoscalingParameters struct { + + // Limit of disk size after autoscaling (GiB). + // +kubebuilder:validation:Optional + DiskSizeLimit *float64 `json:"diskSizeLimit" tf:"disk_size_limit,omitempty"` + + // Immediate autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + EmergencyUsageThreshold *float64 `json:"emergencyUsageThreshold,omitempty" tf:"emergency_usage_threshold,omitempty"` + + // Maintenance window autoscaling disk usage (percent). + // +kubebuilder:validation:Optional + PlannedUsageThreshold *float64 `json:"plannedUsageThreshold,omitempty" tf:"planned_usage_threshold,omitempty"` +} + +type RedisClusterHostInitParameters struct { + + // Sets whether the host should get a public IP address or not. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Replica priority of a current replica (usable for non-sharded only). + ReplicaPriority *float64 `json:"replicaPriority,omitempty" tf:"replica_priority,omitempty"` + + // The name of the shard to which the host belongs. + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The availability zone where the Redis host will be created. For more information see the official documentation. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type RedisClusterHostObservation struct { + + // Sets whether the host should get a public IP address or not. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // The fully qualified domain name of the host. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // Replica priority of a current replica (usable for non-sharded only). + ReplicaPriority *float64 `json:"replicaPriority,omitempty" tf:"replica_priority,omitempty"` + + // The name of the shard to which the host belongs. + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The availability zone where the Redis host will be created. For more information see the official documentation. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type RedisClusterHostParameters struct { + + // Sets whether the host should get a public IP address or not. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Replica priority of a current replica (usable for non-sharded only). + // +kubebuilder:validation:Optional + ReplicaPriority *float64 `json:"replicaPriority,omitempty" tf:"replica_priority,omitempty"` + + // The name of the shard to which the host belongs. + // +kubebuilder:validation:Optional + ShardName *string `json:"shardName,omitempty" tf:"shard_name,omitempty"` + + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The availability zone where the Redis host will be created. For more information see the official documentation. + // +kubebuilder:validation:Optional + Zone *string `json:"zone" tf:"zone,omitempty"` +} + +type RedisClusterInitParameters struct { + + // Access policy to the Redis cluster. The structure is documented below. + Access []RedisClusterAccessInitParameters `json:"access,omitempty" tf:"access,omitempty"` + + // Announce fqdn instead of ip address. + AnnounceHostnames *bool `json:"announceHostnames,omitempty" tf:"announce_hostnames,omitempty"` + + // Configuration of the Redis cluster. The structure is documented below. + Config []RedisClusterConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Redis cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + DiskSizeAutoscaling []RedisClusterDiskSizeAutoscalingInitParameters `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` + + // Deployment environment of the Redis cluster. Can be either PRESTABLE or PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A host of the Redis cluster. The structure is documented below. + Host []RedisClusterHostInitParameters `json:"host,omitempty" tf:"host,omitempty"` + + // A set of key/value label pairs to assign to the Redis cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + MaintenanceWindow []RedisClusterMaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Name of the Redis cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the Redis cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Persistence mode. + PersistenceMode *string `json:"persistenceMode,omitempty" tf:"persistence_mode,omitempty"` + + // Resources allocated to hosts of the Redis cluster. The structure is documented below. + Resources []RedisClusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // Redis Cluster mode enabled/disabled. Enables sharding when cluster non-sharded. If cluster is sharded - disabling is not allowed. + Sharded *bool `json:"sharded,omitempty" tf:"sharded,omitempty"` + + // TLS support mode enabled/disabled. + TLSEnabled *bool `json:"tlsEnabled,omitempty" tf:"tls_enabled,omitempty"` +} + +type RedisClusterMaintenanceWindowInitParameters struct { + + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RedisClusterMaintenanceWindowObservation struct { + + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RedisClusterMaintenanceWindowParameters struct { + + // Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN. + // +kubebuilder:validation:Optional + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RedisClusterObservation struct { + + // Access policy to the Redis cluster. The structure is documented below. + Access []RedisClusterAccessObservation `json:"access,omitempty" tf:"access,omitempty"` + + // Announce fqdn instead of ip address. + AnnounceHostnames *bool `json:"announceHostnames,omitempty" tf:"announce_hostnames,omitempty"` + + // Configuration of the Redis cluster. The structure is documented below. + Config []RedisClusterConfigObservation `json:"config,omitempty" tf:"config,omitempty"` + + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Redis cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + DiskSizeAutoscaling []RedisClusterDiskSizeAutoscalingObservation `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` + + // Deployment environment of the Redis cluster. Can be either PRESTABLE or PRODUCTION. + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Aggregated health of the cluster. Can be either ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation. + Health *string `json:"health,omitempty" tf:"health,omitempty"` + + // A host of the Redis cluster. The structure is documented below. + Host []RedisClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the Redis cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + MaintenanceWindow []RedisClusterMaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Name of the Redis cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the Redis cluster belongs. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Persistence mode. + PersistenceMode *string `json:"persistenceMode,omitempty" tf:"persistence_mode,omitempty"` + + // Resources allocated to hosts of the Redis cluster. The structure is documented below. + Resources []RedisClusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Redis Cluster mode enabled/disabled. Enables sharding when cluster non-sharded. If cluster is sharded - disabling is not allowed. + Sharded *bool `json:"sharded,omitempty" tf:"sharded,omitempty"` + + // Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // TLS support mode enabled/disabled. + TLSEnabled *bool `json:"tlsEnabled,omitempty" tf:"tls_enabled,omitempty"` +} + +type RedisClusterParameters struct { + + // Access policy to the Redis cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Access []RedisClusterAccessParameters `json:"access,omitempty" tf:"access,omitempty"` + + // Announce fqdn instead of ip address. + // +kubebuilder:validation:Optional + AnnounceHostnames *bool `json:"announceHostnames,omitempty" tf:"announce_hostnames,omitempty"` + + // Configuration of the Redis cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Config []RedisClusterConfigParameters `json:"config,omitempty" tf:"config,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Redis cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // +kubebuilder:validation:Optional + DiskSizeAutoscaling []RedisClusterDiskSizeAutoscalingParameters `json:"diskSizeAutoscaling,omitempty" tf:"disk_size_autoscaling,omitempty"` + + // Deployment environment of the Redis cluster. Can be either PRESTABLE or PRODUCTION. + // +kubebuilder:validation:Optional + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A host of the Redis cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Host []RedisClusterHostParameters `json:"host,omitempty" tf:"host,omitempty"` + + // A set of key/value label pairs to assign to the Redis cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // +kubebuilder:validation:Optional + MaintenanceWindow []RedisClusterMaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Name of the Redis cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the Redis cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Persistence mode. + // +kubebuilder:validation:Optional + PersistenceMode *string `json:"persistenceMode,omitempty" tf:"persistence_mode,omitempty"` + + // Resources allocated to hosts of the Redis cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []RedisClusterResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // Redis Cluster mode enabled/disabled. Enables sharding when cluster non-sharded. If cluster is sharded - disabling is not allowed. + // +kubebuilder:validation:Optional + Sharded *bool `json:"sharded,omitempty" tf:"sharded,omitempty"` + + // TLS support mode enabled/disabled. + // +kubebuilder:validation:Optional + TLSEnabled *bool `json:"tlsEnabled,omitempty" tf:"tls_enabled,omitempty"` +} + +type RedisClusterResourcesInitParameters struct { + + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Type of the storage of Redis hosts - environment default is used if missing. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` +} + +type RedisClusterResourcesObservation struct { + + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Type of the storage of Redis hosts - environment default is used if missing. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` +} + +type RedisClusterResourcesParameters struct { + + // Volume of the storage available to a host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + + // Type of the storage of Redis hosts - environment default is used if missing. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` +} + +// RedisClusterSpec defines the desired state of RedisCluster +type RedisClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RedisClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RedisClusterInitParameters `json:"initProvider,omitempty"` +} + +// RedisClusterStatus defines the observed state of RedisCluster. +type RedisClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RedisClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// RedisCluster is the Schema for the RedisClusters API. Manages a Redis cluster within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type RedisCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.config) || (has(self.initProvider) && has(self.initProvider.config))",message="spec.forProvider.config is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.host) || (has(self.initProvider) && has(self.initProvider.host))",message="spec.forProvider.host is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resources) || (has(self.initProvider) && has(self.initProvider.resources))",message="spec.forProvider.resources is a required parameter" + Spec RedisClusterSpec `json:"spec"` + Status RedisClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RedisClusterList contains a list of RedisClusters +type RedisClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RedisCluster `json:"items"` +} + +// Repository type metadata. +var ( + RedisCluster_Kind = "RedisCluster" + RedisCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RedisCluster_Kind}.String() + RedisCluster_KindAPIVersion = RedisCluster_Kind + "." + CRDGroupVersion.String() + RedisCluster_GroupVersionKind = CRDGroupVersion.WithKind(RedisCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&RedisCluster{}, &RedisClusterList{}) +} diff --git a/apis/mdb/v1alpha1/zz_sqlservercluster_terraformed.go b/apis/mdb/v1alpha1/zz_sqlservercluster_terraformed.go new file mode 100755 index 0000000..05b1ec2 --- /dev/null +++ b/apis/mdb/v1alpha1/zz_sqlservercluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SqlserverCluster +func (mg *SqlserverCluster) GetTerraformResourceType() string { + return "yandex_mdb_sqlserver_cluster" +} + +// GetConnectionDetailsMapping for this SqlserverCluster +func (tr *SqlserverCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"user[*].password": "user[*].passwordSecretRef"} +} + +// GetObservation of this SqlserverCluster +func (tr *SqlserverCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SqlserverCluster +func (tr *SqlserverCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SqlserverCluster +func (tr *SqlserverCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SqlserverCluster +func (tr *SqlserverCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SqlserverCluster +func (tr *SqlserverCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SqlserverCluster +func (tr *SqlserverCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SqlserverCluster +func (tr *SqlserverCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SqlserverCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SqlserverCluster) LateInitialize(attrs []byte) (bool, error) { + params := &SqlserverClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SqlserverCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mdb/v1alpha1/zz_sqlservercluster_types.go b/apis/mdb/v1alpha1/zz_sqlservercluster_types.go new file mode 100755 index 0000000..355a2ff --- /dev/null +++ b/apis/mdb/v1alpha1/zz_sqlservercluster_types.go @@ -0,0 +1,546 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SqlserverClusterBackupWindowStartInitParameters struct { + + // The hour at which backup will be started. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // The minute at which backup will be started. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type SqlserverClusterBackupWindowStartObservation struct { + + // The hour at which backup will be started. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // The minute at which backup will be started. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type SqlserverClusterBackupWindowStartParameters struct { + + // The hour at which backup will be started. + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // The minute at which backup will be started. + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type SqlserverClusterDatabaseInitParameters struct { + + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SqlserverClusterDatabaseObservation struct { + + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SqlserverClusterDatabaseParameters struct { + + // The name of the database. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type SqlserverClusterHostInitParameters struct { + + // Sets whether the host should get a public IP address on creation. Changing this parameter for an existing host is not supported at the moment + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The availability zone where the SQLServer host will be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type SqlserverClusterHostObservation struct { + + // Sets whether the host should get a public IP address on creation. Changing this parameter for an existing host is not supported at the moment + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // (Computed) The fully qualified domain name of the host. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The availability zone where the SQLServer host will be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type SqlserverClusterHostParameters struct { + + // Sets whether the host should get a public IP address on creation. Changing this parameter for an existing host is not supported at the moment + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The availability zone where the SQLServer host will be created. + // +kubebuilder:validation:Optional + Zone *string `json:"zone" tf:"zone,omitempty"` +} + +type SqlserverClusterInitParameters struct { + + // Time to start the daily backup, in the UTC. The structure is documented below. + BackupWindowStart []SqlserverClusterBackupWindowStartInitParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + + // A database of the SQLServer cluster. The structure is documented below. + Database []SqlserverClusterDatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the SQLServer cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Deployment environment of the SQLServer cluster. (PRODUCTION, PRESTABLE) + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A host of the SQLServer cluster. The structure is documented below. + Host []SqlserverClusterHostInitParameters `json:"host,omitempty" tf:"host,omitempty"` + + // A list of IDs of the host groups hosting VMs of the cluster. + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // A set of key/value label pairs to assign to the SQLServer cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the SQLServer cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the SQLServer cluster uses. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Resources allocated to hosts of the SQLServer cluster. The structure is documented below. + Resources []SqlserverClusterResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // SQL Collation cluster will be created with. This attribute cannot be changed when cluster is created! + Sqlcollation *string `json:"sqlcollation,omitempty" tf:"sqlcollation,omitempty"` + + // SQLServer cluster config. Detail info in "SQLServer config" section (documented below). + // +mapType=granular + SqlserverConfig map[string]*string `json:"sqlserverConfig,omitempty" tf:"sqlserver_config,omitempty"` + + // A user of the SQLServer cluster. The structure is documented below. + User []SqlserverClusterUserInitParameters `json:"user,omitempty" tf:"user,omitempty"` + + // Version of the SQLServer cluster. (2016sp2std, 2016sp2ent) + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SqlserverClusterObservation struct { + + // Time to start the daily backup, in the UTC. The structure is documented below. + BackupWindowStart []SqlserverClusterBackupWindowStartObservation `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + + // Creation timestamp of the cluster. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // A database of the SQLServer cluster. The structure is documented below. + Database []SqlserverClusterDatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the SQLServer cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Deployment environment of the SQLServer cluster. (PRODUCTION, PRESTABLE) + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Aggregated health of the cluster. + Health *string `json:"health,omitempty" tf:"health,omitempty"` + + // A host of the SQLServer cluster. The structure is documented below. + Host []SqlserverClusterHostObservation `json:"host,omitempty" tf:"host,omitempty"` + + // A list of IDs of the host groups hosting VMs of the cluster. + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the SQLServer cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the SQLServer cluster. Provided by the client when the cluster is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the SQLServer cluster uses. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Resources allocated to hosts of the SQLServer cluster. The structure is documented below. + Resources []SqlserverClusterResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // SQL Collation cluster will be created with. This attribute cannot be changed when cluster is created! + Sqlcollation *string `json:"sqlcollation,omitempty" tf:"sqlcollation,omitempty"` + + // SQLServer cluster config. Detail info in "SQLServer config" section (documented below). + // +mapType=granular + SqlserverConfig map[string]*string `json:"sqlserverConfig,omitempty" tf:"sqlserver_config,omitempty"` + + // Status of the cluster. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A user of the SQLServer cluster. The structure is documented below. + User []SqlserverClusterUserObservation `json:"user,omitempty" tf:"user,omitempty"` + + // Version of the SQLServer cluster. (2016sp2std, 2016sp2ent) + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SqlserverClusterParameters struct { + + // Time to start the daily backup, in the UTC. The structure is documented below. + // +kubebuilder:validation:Optional + BackupWindowStart []SqlserverClusterBackupWindowStartParameters `json:"backupWindowStart,omitempty" tf:"backup_window_start,omitempty"` + + // A database of the SQLServer cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Database []SqlserverClusterDatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the SQLServer cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Deployment environment of the SQLServer cluster. (PRODUCTION, PRESTABLE) + // +kubebuilder:validation:Optional + Environment *string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A host of the SQLServer cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Host []SqlserverClusterHostParameters `json:"host,omitempty" tf:"host,omitempty"` + + // A list of IDs of the host groups hosting VMs of the cluster. + // +kubebuilder:validation:Optional + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // A set of key/value label pairs to assign to the SQLServer cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the SQLServer cluster. Provided by the client when the cluster is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network, to which the SQLServer cluster uses. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // Resources allocated to hosts of the SQLServer cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []SqlserverClusterResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // A set of ids of security groups assigned to hosts of the cluster. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsRefs []v1.Reference `json:"securityGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in vpc to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIdsSelector *v1.Selector `json:"securityGroupIdsSelector,omitempty" tf:"-"` + + // SQL Collation cluster will be created with. This attribute cannot be changed when cluster is created! + // +kubebuilder:validation:Optional + Sqlcollation *string `json:"sqlcollation,omitempty" tf:"sqlcollation,omitempty"` + + // SQLServer cluster config. Detail info in "SQLServer config" section (documented below). + // +kubebuilder:validation:Optional + // +mapType=granular + SqlserverConfig map[string]*string `json:"sqlserverConfig,omitempty" tf:"sqlserver_config,omitempty"` + + // A user of the SQLServer cluster. The structure is documented below. + // +kubebuilder:validation:Optional + User []SqlserverClusterUserParameters `json:"user,omitempty" tf:"user,omitempty"` + + // Version of the SQLServer cluster. (2016sp2std, 2016sp2ent) + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SqlserverClusterResourcesInitParameters struct { + + // Volume of the storage available to a SQLServer host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Type of the storage of SQLServer hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` +} + +type SqlserverClusterResourcesObservation struct { + + // Volume of the storage available to a SQLServer host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Type of the storage of SQLServer hosts. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` +} + +type SqlserverClusterResourcesParameters struct { + + // Volume of the storage available to a SQLServer host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + + // Type of the storage of SQLServer hosts. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId" tf:"disk_type_id,omitempty"` + + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` +} + +type SqlserverClusterUserInitParameters struct { + + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password of the user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Set of permissions granted to the user. The structure is documented below. + Permission []SqlserverClusterUserPermissionInitParameters `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type SqlserverClusterUserObservation struct { + + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Set of permissions granted to the user. The structure is documented below. + Permission []SqlserverClusterUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type SqlserverClusterUserParameters struct { + + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The password of the user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Set of permissions granted to the user. The structure is documented below. + // +kubebuilder:validation:Optional + Permission []SqlserverClusterUserPermissionParameters `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type SqlserverClusterUserPermissionInitParameters struct { + + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // List user's roles in the database. Allowed roles: OWNER, SECURITYADMIN, ACCESSADMIN, BACKUPOPERATOR, DDLADMIN, DATAWRITER, DATAREADER, DENYDATAWRITER, DENYDATAREADER. + // +listType=set + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` +} + +type SqlserverClusterUserPermissionObservation struct { + + // The name of the database that the permission grants access to. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // List user's roles in the database. Allowed roles: OWNER, SECURITYADMIN, ACCESSADMIN, BACKUPOPERATOR, DDLADMIN, DATAWRITER, DATAREADER, DENYDATAWRITER, DENYDATAREADER. + // +listType=set + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` +} + +type SqlserverClusterUserPermissionParameters struct { + + // The name of the database that the permission grants access to. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // List user's roles in the database. Allowed roles: OWNER, SECURITYADMIN, ACCESSADMIN, BACKUPOPERATOR, DDLADMIN, DATAWRITER, DATAREADER, DENYDATAWRITER, DENYDATAREADER. + // +kubebuilder:validation:Optional + // +listType=set + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` +} + +// SqlserverClusterSpec defines the desired state of SqlserverCluster +type SqlserverClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SqlserverClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SqlserverClusterInitParameters `json:"initProvider,omitempty"` +} + +// SqlserverClusterStatus defines the observed state of SqlserverCluster. +type SqlserverClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SqlserverClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SqlserverCluster is the Schema for the SqlserverClusters API. Manages a Microsoft SQLServer cluster within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SqlserverCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.database) || (has(self.initProvider) && has(self.initProvider.database))",message="spec.forProvider.database is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.environment) || (has(self.initProvider) && has(self.initProvider.environment))",message="spec.forProvider.environment is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.host) || (has(self.initProvider) && has(self.initProvider.host))",message="spec.forProvider.host is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resources) || (has(self.initProvider) && has(self.initProvider.resources))",message="spec.forProvider.resources is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.user) || (has(self.initProvider) && has(self.initProvider.user))",message="spec.forProvider.user is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec SqlserverClusterSpec `json:"spec"` + Status SqlserverClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SqlserverClusterList contains a list of SqlserverClusters +type SqlserverClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SqlserverCluster `json:"items"` +} + +// Repository type metadata. +var ( + SqlserverCluster_Kind = "SqlserverCluster" + SqlserverCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SqlserverCluster_Kind}.String() + SqlserverCluster_KindAPIVersion = SqlserverCluster_Kind + "." + CRDGroupVersion.String() + SqlserverCluster_GroupVersionKind = CRDGroupVersion.WithKind(SqlserverCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&SqlserverCluster{}, &SqlserverClusterList{}) +} diff --git a/apis/message/v1alpha1/zz_generated.conversion_hubs.go b/apis/message/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..3ed3128 --- /dev/null +++ b/apis/message/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Queue) Hub() {} diff --git a/apis/message/v1alpha1/zz_generated.deepcopy.go b/apis/message/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..5a948f3 --- /dev/null +++ b/apis/message/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,369 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Queue) DeepCopyInto(out *Queue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. +func (in *Queue) DeepCopy() *Queue { + if in == nil { + return nil + } + out := new(Queue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Queue) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueInitParameters) DeepCopyInto(out *QueueInitParameters) { + *out = *in + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.AccessKeyRef != nil { + in, out := &in.AccessKeyRef, &out.AccessKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessKeySelector != nil { + in, out := &in.AccessKeySelector, &out.AccessKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ContentBasedDeduplication != nil { + in, out := &in.ContentBasedDeduplication, &out.ContentBasedDeduplication + *out = new(bool) + **out = **in + } + if in.DelaySeconds != nil { + in, out := &in.DelaySeconds, &out.DelaySeconds + *out = new(float64) + **out = **in + } + if in.FifoQueue != nil { + in, out := &in.FifoQueue, &out.FifoQueue + *out = new(bool) + **out = **in + } + if in.MaxMessageSize != nil { + in, out := &in.MaxMessageSize, &out.MaxMessageSize + *out = new(float64) + **out = **in + } + if in.MessageRetentionSeconds != nil { + in, out := &in.MessageRetentionSeconds, &out.MessageRetentionSeconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NamePrefix != nil { + in, out := &in.NamePrefix, &out.NamePrefix + *out = new(string) + **out = **in + } + if in.ReceiveWaitTimeSeconds != nil { + in, out := &in.ReceiveWaitTimeSeconds, &out.ReceiveWaitTimeSeconds + *out = new(float64) + **out = **in + } + if in.RedrivePolicy != nil { + in, out := &in.RedrivePolicy, &out.RedrivePolicy + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.SecretKeySecretRef != nil { + in, out := &in.SecretKeySecretRef, &out.SecretKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.VisibilityTimeoutSeconds != nil { + in, out := &in.VisibilityTimeoutSeconds, &out.VisibilityTimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueInitParameters. +func (in *QueueInitParameters) DeepCopy() *QueueInitParameters { + if in == nil { + return nil + } + out := new(QueueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueList) DeepCopyInto(out *QueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Queue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. +func (in *QueueList) DeepCopy() *QueueList { + if in == nil { + return nil + } + out := new(QueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueObservation) DeepCopyInto(out *QueueObservation) { + *out = *in + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ContentBasedDeduplication != nil { + in, out := &in.ContentBasedDeduplication, &out.ContentBasedDeduplication + *out = new(bool) + **out = **in + } + if in.DelaySeconds != nil { + in, out := &in.DelaySeconds, &out.DelaySeconds + *out = new(float64) + **out = **in + } + if in.FifoQueue != nil { + in, out := &in.FifoQueue, &out.FifoQueue + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxMessageSize != nil { + in, out := &in.MaxMessageSize, &out.MaxMessageSize + *out = new(float64) + **out = **in + } + if in.MessageRetentionSeconds != nil { + in, out := &in.MessageRetentionSeconds, &out.MessageRetentionSeconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NamePrefix != nil { + in, out := &in.NamePrefix, &out.NamePrefix + *out = new(string) + **out = **in + } + if in.ReceiveWaitTimeSeconds != nil { + in, out := &in.ReceiveWaitTimeSeconds, &out.ReceiveWaitTimeSeconds + *out = new(float64) + **out = **in + } + if in.RedrivePolicy != nil { + in, out := &in.RedrivePolicy, &out.RedrivePolicy + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.VisibilityTimeoutSeconds != nil { + in, out := &in.VisibilityTimeoutSeconds, &out.VisibilityTimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueObservation. +func (in *QueueObservation) DeepCopy() *QueueObservation { + if in == nil { + return nil + } + out := new(QueueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueParameters) DeepCopyInto(out *QueueParameters) { + *out = *in + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.AccessKeyRef != nil { + in, out := &in.AccessKeyRef, &out.AccessKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessKeySelector != nil { + in, out := &in.AccessKeySelector, &out.AccessKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ContentBasedDeduplication != nil { + in, out := &in.ContentBasedDeduplication, &out.ContentBasedDeduplication + *out = new(bool) + **out = **in + } + if in.DelaySeconds != nil { + in, out := &in.DelaySeconds, &out.DelaySeconds + *out = new(float64) + **out = **in + } + if in.FifoQueue != nil { + in, out := &in.FifoQueue, &out.FifoQueue + *out = new(bool) + **out = **in + } + if in.MaxMessageSize != nil { + in, out := &in.MaxMessageSize, &out.MaxMessageSize + *out = new(float64) + **out = **in + } + if in.MessageRetentionSeconds != nil { + in, out := &in.MessageRetentionSeconds, &out.MessageRetentionSeconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NamePrefix != nil { + in, out := &in.NamePrefix, &out.NamePrefix + *out = new(string) + **out = **in + } + if in.ReceiveWaitTimeSeconds != nil { + in, out := &in.ReceiveWaitTimeSeconds, &out.ReceiveWaitTimeSeconds + *out = new(float64) + **out = **in + } + if in.RedrivePolicy != nil { + in, out := &in.RedrivePolicy, &out.RedrivePolicy + *out = new(string) + **out = **in + } + if in.RegionID != nil { + in, out := &in.RegionID, &out.RegionID + *out = new(string) + **out = **in + } + if in.SecretKeySecretRef != nil { + in, out := &in.SecretKeySecretRef, &out.SecretKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.VisibilityTimeoutSeconds != nil { + in, out := &in.VisibilityTimeoutSeconds, &out.VisibilityTimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueParameters. +func (in *QueueParameters) DeepCopy() *QueueParameters { + if in == nil { + return nil + } + out := new(QueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. +func (in *QueueSpec) DeepCopy() *QueueSpec { + if in == nil { + return nil + } + out := new(QueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueStatus) DeepCopyInto(out *QueueStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus. +func (in *QueueStatus) DeepCopy() *QueueStatus { + if in == nil { + return nil + } + out := new(QueueStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/message/v1alpha1/zz_generated.resolvers.go b/apis/message/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..55d942d --- /dev/null +++ b/apis/message/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,54 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + common "github.com/tagesjump/provider-upjet-yc/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Queue. +func (mg *Queue) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccessKey), + Extract: common.ExtractAccessKey(), + Reference: mg.Spec.ForProvider.AccessKeyRef, + Selector: mg.Spec.ForProvider.AccessKeySelector, + To: reference.To{ + List: &v1alpha1.ServiceAccountStaticAccessKeyList{}, + Managed: &v1alpha1.ServiceAccountStaticAccessKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccessKey") + } + mg.Spec.ForProvider.AccessKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccessKeyRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AccessKey), + Extract: common.ExtractAccessKey(), + Reference: mg.Spec.InitProvider.AccessKeyRef, + Selector: mg.Spec.InitProvider.AccessKeySelector, + To: reference.To{ + List: &v1alpha1.ServiceAccountStaticAccessKeyList{}, + Managed: &v1alpha1.ServiceAccountStaticAccessKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AccessKey") + } + mg.Spec.InitProvider.AccessKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AccessKeyRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/message/v1alpha1/zz_groupversion_info.go b/apis/message/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..dbe0b0b --- /dev/null +++ b/apis/message/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=message.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "message.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/message/v1alpha1/zz_queue_terraformed.go b/apis/message/v1alpha1/zz_queue_terraformed.go new file mode 100755 index 0000000..2648515 --- /dev/null +++ b/apis/message/v1alpha1/zz_queue_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Queue +func (mg *Queue) GetTerraformResourceType() string { + return "yandex_message_queue" +} + +// GetConnectionDetailsMapping for this Queue +func (tr *Queue) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"secret_key": "secretKeySecretRef"} +} + +// GetObservation of this Queue +func (tr *Queue) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Queue +func (tr *Queue) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Queue +func (tr *Queue) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Queue +func (tr *Queue) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Queue +func (tr *Queue) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Queue +func (tr *Queue) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Queue +func (tr *Queue) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Queue using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Queue) LateInitialize(attrs []byte) (bool, error) { + params := &QueueParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Queue) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/message/v1alpha1/zz_queue_types.go b/apis/message/v1alpha1/zz_queue_types.go new file mode 100755 index 0000000..01c8ef6 --- /dev/null +++ b/apis/message/v1alpha1/zz_queue_types.go @@ -0,0 +1,233 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type QueueInitParameters struct { + + // The access key to use when applying changes. If omitted, ymq_access_key specified in provider config is used. For more information see documentation. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccountStaticAccessKey + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractAccessKey() + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // Reference to a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeyRef *v1.Reference `json:"accessKeyRef,omitempty" tf:"-"` + + // Selector for a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeySelector *v1.Selector `json:"accessKeySelector,omitempty" tf:"-"` + + // Enables content-based deduplication. Can be used only if queue is FIFO. + ContentBasedDeduplication *bool `json:"contentBasedDeduplication,omitempty" tf:"content_based_deduplication,omitempty"` + + // Number of seconds to delay the message from being available for processing. Valid values: from 0 to 900 seconds (15 minutes). Default: 0. + DelaySeconds *float64 `json:"delaySeconds,omitempty" tf:"delay_seconds,omitempty"` + + // Is this queue FIFO. If this parameter is not used, a standard queue is created. You cannot change the parameter value for a created queue. + FifoQueue *bool `json:"fifoQueue,omitempty" tf:"fifo_queue,omitempty"` + + // Maximum message size in bytes. Valid values: from 1024 bytes (1 KB) to 262144 bytes (256 KB). Default: 262144 (256 KB). For more information see documentation. + MaxMessageSize *float64 `json:"maxMessageSize,omitempty" tf:"max_message_size,omitempty"` + + // The length of time in seconds to retain a message. Valid values: from 60 seconds (1 minute) to 1209600 seconds (14 days). Default: 345600 (4 days). For more information see documentation. + MessageRetentionSeconds *float64 `json:"messageRetentionSeconds,omitempty" tf:"message_retention_seconds,omitempty"` + + // Queue name. The maximum length is 80 characters. You can use numbers, letters, underscores, and hyphens in the name. The name of a FIFO queue must end with the .fifo suffix. If not specified, random name will be generated. Conflicts with name_prefix. For more information see documentation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Generates random name with the specified prefix. Conflicts with name. + NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` + + // Wait time for the ReceiveMessage method (for long polling), in seconds. Valid values: from 0 to 20 seconds. Default: 0. For more information about long polling see documentation. + ReceiveWaitTimeSeconds *float64 `json:"receiveWaitTimeSeconds,omitempty" tf:"receive_wait_time_seconds,omitempty"` + + // Message redrive policy in Dead Letter Queue. The source queue and DLQ must be the same type: for FIFO queues, the DLQ must also be a FIFO queue. For more information about redrive policy see documentation. Also you can use example in this page. + RedrivePolicy *string `json:"redrivePolicy,omitempty" tf:"redrive_policy,omitempty"` + + // ID of the region where the message queue is located at. The default is 'ru-central1'. + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + + // The secret key to use when applying changes. If omitted, ymq_secret_key specified in provider config is used. For more information see documentation. + SecretKeySecretRef *v1.SecretKeySelector `json:"secretKeySecretRef,omitempty" tf:"-"` + + // Visibility timeout for messages in a queue, specified in seconds. Valid values: from 0 to 43200 seconds (12 hours). Default: 30. + VisibilityTimeoutSeconds *float64 `json:"visibilityTimeoutSeconds,omitempty" tf:"visibility_timeout_seconds,omitempty"` +} + +type QueueObservation struct { + + // The access key to use when applying changes. If omitted, ymq_access_key specified in provider config is used. For more information see documentation. + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // ARN of the Yandex Message Queue. It is used for setting up a redrive policy. See documentation. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Enables content-based deduplication. Can be used only if queue is FIFO. + ContentBasedDeduplication *bool `json:"contentBasedDeduplication,omitempty" tf:"content_based_deduplication,omitempty"` + + // Number of seconds to delay the message from being available for processing. Valid values: from 0 to 900 seconds (15 minutes). Default: 0. + DelaySeconds *float64 `json:"delaySeconds,omitempty" tf:"delay_seconds,omitempty"` + + // Is this queue FIFO. If this parameter is not used, a standard queue is created. You cannot change the parameter value for a created queue. + FifoQueue *bool `json:"fifoQueue,omitempty" tf:"fifo_queue,omitempty"` + + // URL of the Yandex Message Queue. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Maximum message size in bytes. Valid values: from 1024 bytes (1 KB) to 262144 bytes (256 KB). Default: 262144 (256 KB). For more information see documentation. + MaxMessageSize *float64 `json:"maxMessageSize,omitempty" tf:"max_message_size,omitempty"` + + // The length of time in seconds to retain a message. Valid values: from 60 seconds (1 minute) to 1209600 seconds (14 days). Default: 345600 (4 days). For more information see documentation. + MessageRetentionSeconds *float64 `json:"messageRetentionSeconds,omitempty" tf:"message_retention_seconds,omitempty"` + + // Queue name. The maximum length is 80 characters. You can use numbers, letters, underscores, and hyphens in the name. The name of a FIFO queue must end with the .fifo suffix. If not specified, random name will be generated. Conflicts with name_prefix. For more information see documentation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Generates random name with the specified prefix. Conflicts with name. + NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` + + // Wait time for the ReceiveMessage method (for long polling), in seconds. Valid values: from 0 to 20 seconds. Default: 0. For more information about long polling see documentation. + ReceiveWaitTimeSeconds *float64 `json:"receiveWaitTimeSeconds,omitempty" tf:"receive_wait_time_seconds,omitempty"` + + // Message redrive policy in Dead Letter Queue. The source queue and DLQ must be the same type: for FIFO queues, the DLQ must also be a FIFO queue. For more information about redrive policy see documentation. Also you can use example in this page. + RedrivePolicy *string `json:"redrivePolicy,omitempty" tf:"redrive_policy,omitempty"` + + // ID of the region where the message queue is located at. The default is 'ru-central1'. + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + + // Visibility timeout for messages in a queue, specified in seconds. Valid values: from 0 to 43200 seconds (12 hours). Default: 30. + VisibilityTimeoutSeconds *float64 `json:"visibilityTimeoutSeconds,omitempty" tf:"visibility_timeout_seconds,omitempty"` +} + +type QueueParameters struct { + + // The access key to use when applying changes. If omitted, ymq_access_key specified in provider config is used. For more information see documentation. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccountStaticAccessKey + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractAccessKey() + // +kubebuilder:validation:Optional + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // Reference to a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeyRef *v1.Reference `json:"accessKeyRef,omitempty" tf:"-"` + + // Selector for a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeySelector *v1.Selector `json:"accessKeySelector,omitempty" tf:"-"` + + // Enables content-based deduplication. Can be used only if queue is FIFO. + // +kubebuilder:validation:Optional + ContentBasedDeduplication *bool `json:"contentBasedDeduplication,omitempty" tf:"content_based_deduplication,omitempty"` + + // Number of seconds to delay the message from being available for processing. Valid values: from 0 to 900 seconds (15 minutes). Default: 0. + // +kubebuilder:validation:Optional + DelaySeconds *float64 `json:"delaySeconds,omitempty" tf:"delay_seconds,omitempty"` + + // Is this queue FIFO. If this parameter is not used, a standard queue is created. You cannot change the parameter value for a created queue. + // +kubebuilder:validation:Optional + FifoQueue *bool `json:"fifoQueue,omitempty" tf:"fifo_queue,omitempty"` + + // Maximum message size in bytes. Valid values: from 1024 bytes (1 KB) to 262144 bytes (256 KB). Default: 262144 (256 KB). For more information see documentation. + // +kubebuilder:validation:Optional + MaxMessageSize *float64 `json:"maxMessageSize,omitempty" tf:"max_message_size,omitempty"` + + // The length of time in seconds to retain a message. Valid values: from 60 seconds (1 minute) to 1209600 seconds (14 days). Default: 345600 (4 days). For more information see documentation. + // +kubebuilder:validation:Optional + MessageRetentionSeconds *float64 `json:"messageRetentionSeconds,omitempty" tf:"message_retention_seconds,omitempty"` + + // Queue name. The maximum length is 80 characters. You can use numbers, letters, underscores, and hyphens in the name. The name of a FIFO queue must end with the .fifo suffix. If not specified, random name will be generated. Conflicts with name_prefix. For more information see documentation. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Generates random name with the specified prefix. Conflicts with name. + // +kubebuilder:validation:Optional + NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` + + // Wait time for the ReceiveMessage method (for long polling), in seconds. Valid values: from 0 to 20 seconds. Default: 0. For more information about long polling see documentation. + // +kubebuilder:validation:Optional + ReceiveWaitTimeSeconds *float64 `json:"receiveWaitTimeSeconds,omitempty" tf:"receive_wait_time_seconds,omitempty"` + + // Message redrive policy in Dead Letter Queue. The source queue and DLQ must be the same type: for FIFO queues, the DLQ must also be a FIFO queue. For more information about redrive policy see documentation. Also you can use example in this page. + // +kubebuilder:validation:Optional + RedrivePolicy *string `json:"redrivePolicy,omitempty" tf:"redrive_policy,omitempty"` + + // ID of the region where the message queue is located at. The default is 'ru-central1'. + // +kubebuilder:validation:Optional + RegionID *string `json:"regionId,omitempty" tf:"region_id,omitempty"` + + // The secret key to use when applying changes. If omitted, ymq_secret_key specified in provider config is used. For more information see documentation. + // +kubebuilder:validation:Optional + SecretKeySecretRef *v1.SecretKeySelector `json:"secretKeySecretRef,omitempty" tf:"-"` + + // Visibility timeout for messages in a queue, specified in seconds. Valid values: from 0 to 43200 seconds (12 hours). Default: 30. + // +kubebuilder:validation:Optional + VisibilityTimeoutSeconds *float64 `json:"visibilityTimeoutSeconds,omitempty" tf:"visibility_timeout_seconds,omitempty"` +} + +// QueueSpec defines the desired state of Queue +type QueueSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider QueueParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider QueueInitParameters `json:"initProvider,omitempty"` +} + +// QueueStatus defines the observed state of Queue. +type QueueStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider QueueObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Queue is the Schema for the Queues API. Allows management of a Yandex.Cloud Message Queue. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Queue struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec QueueSpec `json:"spec"` + Status QueueStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// QueueList contains a list of Queues +type QueueList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Queue `json:"items"` +} + +// Repository type metadata. +var ( + Queue_Kind = "Queue" + Queue_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Queue_Kind}.String() + Queue_KindAPIVersion = Queue_Kind + "." + CRDGroupVersion.String() + Queue_GroupVersionKind = CRDGroupVersion.WithKind(Queue_Kind) +) + +func init() { + SchemeBuilder.Register(&Queue{}, &QueueList{}) +} diff --git a/apis/monitoring/v1alpha1/zz_dashboard_terraformed.go b/apis/monitoring/v1alpha1/zz_dashboard_terraformed.go index 9181180..900edde 100755 --- a/apis/monitoring/v1alpha1/zz_dashboard_terraformed.go +++ b/apis/monitoring/v1alpha1/zz_dashboard_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Dashboard func (mg *Dashboard) GetTerraformResourceType() string { - return "yandex_monitoring_dashboard" + return "yandex_monitoring_dashboard" } // GetConnectionDetailsMapping for this Dashboard func (tr *Dashboard) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Dashboard func (tr *Dashboard) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Dashboard func (tr *Dashboard) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Dashboard func (tr *Dashboard) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Dashboard func (tr *Dashboard) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Dashboard func (tr *Dashboard) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Dashboard func (tr *Dashboard) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Dashboard func (tr *Dashboard) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Dashboard using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Dashboard) LateInitialize(attrs []byte) (bool, error) { - params := &DashboardParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &DashboardParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Dashboard) GetTerraformSchemaVersion() int { - return 1 + return 1 } diff --git a/apis/monitoring/v1alpha1/zz_dashboard_types.go b/apis/monitoring/v1alpha1/zz_dashboard_types.go index e71b72f..4df46be 100755 --- a/apis/monitoring/v1alpha1/zz_dashboard_types.go +++ b/apis/monitoring/v1alpha1/zz_dashboard_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,1744 +7,1585 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AutomaticInitParameters struct { - } - type AutomaticObservation struct { - } - type AutomaticParameters struct { - } - type ChartInitParameters struct { + // Chart ID. + // Chart ID + ChartID *string `json:"chartId,omitempty" tf:"chart_id,omitempty"` -// Chart ID. -// Chart ID -ChartID *string `json:"chartId,omitempty" tf:"chart_id,omitempty"` - -// Parameter description. -// Chart description in dashboard (not enabled in UI) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Parameter description. + // Chart description in dashboard (not enabled in UI) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Enable legend under chart. -// Enable legend under chart -DisplayLegend *bool `json:"displayLegend,omitempty" tf:"display_legend,omitempty"` + // Enable legend under chart. + // Enable legend under chart + DisplayLegend *bool `json:"displayLegend,omitempty" tf:"display_legend,omitempty"` -// Fixed time interval for chart. Values: -// Fixed time interval for chart -Freeze *string `json:"freeze,omitempty" tf:"freeze,omitempty"` + // Fixed time interval for chart. Values: + // Fixed time interval for chart + Freeze *string `json:"freeze,omitempty" tf:"freeze,omitempty"` -// Names settings. -// Name hiding settings -NameHidingSettings []NameHidingSettingsInitParameters `json:"nameHidingSettings,omitempty" tf:"name_hiding_settings,omitempty"` + // Names settings. + // Name hiding settings + NameHidingSettings []NameHidingSettingsInitParameters `json:"nameHidingSettings,omitempty" tf:"name_hiding_settings,omitempty"` -// Queries settings. -// Queries -Queries []QueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` + // Queries settings. + // Queries + Queries []QueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` -// Time series settings. -SeriesOverrides []SeriesOverridesInitParameters `json:"seriesOverrides,omitempty" tf:"series_overrides,omitempty"` + // Time series settings. + SeriesOverrides []SeriesOverridesInitParameters `json:"seriesOverrides,omitempty" tf:"series_overrides,omitempty"` -// Title or empty. -// Chart widget title -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Title or empty. + // Chart widget title + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Visualization settings. -// Visualization settings -VisualizationSettings []VisualizationSettingsInitParameters `json:"visualizationSettings,omitempty" tf:"visualization_settings,omitempty"` + // Visualization settings. + // Visualization settings + VisualizationSettings []VisualizationSettingsInitParameters `json:"visualizationSettings,omitempty" tf:"visualization_settings,omitempty"` } - type ChartObservation struct { + // Chart ID. + // Chart ID + ChartID *string `json:"chartId,omitempty" tf:"chart_id,omitempty"` -// Chart ID. -// Chart ID -ChartID *string `json:"chartId,omitempty" tf:"chart_id,omitempty"` + // Parameter description. + // Chart description in dashboard (not enabled in UI) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Parameter description. -// Chart description in dashboard (not enabled in UI) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Enable legend under chart. + // Enable legend under chart + DisplayLegend *bool `json:"displayLegend,omitempty" tf:"display_legend,omitempty"` -// Enable legend under chart. -// Enable legend under chart -DisplayLegend *bool `json:"displayLegend,omitempty" tf:"display_legend,omitempty"` + // Fixed time interval for chart. Values: + // Fixed time interval for chart + Freeze *string `json:"freeze,omitempty" tf:"freeze,omitempty"` -// Fixed time interval for chart. Values: -// Fixed time interval for chart -Freeze *string `json:"freeze,omitempty" tf:"freeze,omitempty"` + // Names settings. + // Name hiding settings + NameHidingSettings []NameHidingSettingsObservation `json:"nameHidingSettings,omitempty" tf:"name_hiding_settings,omitempty"` -// Names settings. -// Name hiding settings -NameHidingSettings []NameHidingSettingsObservation `json:"nameHidingSettings,omitempty" tf:"name_hiding_settings,omitempty"` + // Queries settings. + // Queries + Queries []QueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` -// Queries settings. -// Queries -Queries []QueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` + // Time series settings. + SeriesOverrides []SeriesOverridesObservation `json:"seriesOverrides,omitempty" tf:"series_overrides,omitempty"` -// Time series settings. -SeriesOverrides []SeriesOverridesObservation `json:"seriesOverrides,omitempty" tf:"series_overrides,omitempty"` + // Title or empty. + // Chart widget title + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Title or empty. -// Chart widget title -Title *string `json:"title,omitempty" tf:"title,omitempty"` - -// Visualization settings. -// Visualization settings -VisualizationSettings []VisualizationSettingsObservation `json:"visualizationSettings,omitempty" tf:"visualization_settings,omitempty"` + // Visualization settings. + // Visualization settings + VisualizationSettings []VisualizationSettingsObservation `json:"visualizationSettings,omitempty" tf:"visualization_settings,omitempty"` } - type ChartParameters struct { + // Chart ID. + // Chart ID + // +kubebuilder:validation:Optional + ChartID *string `json:"chartId,omitempty" tf:"chart_id,omitempty"` -// Chart ID. -// Chart ID -// +kubebuilder:validation:Optional -ChartID *string `json:"chartId,omitempty" tf:"chart_id,omitempty"` + // Parameter description. + // Chart description in dashboard (not enabled in UI) + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Parameter description. -// Chart description in dashboard (not enabled in UI) -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Enable legend under chart. + // Enable legend under chart + // +kubebuilder:validation:Optional + DisplayLegend *bool `json:"displayLegend,omitempty" tf:"display_legend,omitempty"` -// Enable legend under chart. -// Enable legend under chart -// +kubebuilder:validation:Optional -DisplayLegend *bool `json:"displayLegend,omitempty" tf:"display_legend,omitempty"` + // Fixed time interval for chart. Values: + // Fixed time interval for chart + // +kubebuilder:validation:Optional + Freeze *string `json:"freeze,omitempty" tf:"freeze,omitempty"` -// Fixed time interval for chart. Values: -// Fixed time interval for chart -// +kubebuilder:validation:Optional -Freeze *string `json:"freeze,omitempty" tf:"freeze,omitempty"` + // Names settings. + // Name hiding settings + // +kubebuilder:validation:Optional + NameHidingSettings []NameHidingSettingsParameters `json:"nameHidingSettings,omitempty" tf:"name_hiding_settings,omitempty"` -// Names settings. -// Name hiding settings -// +kubebuilder:validation:Optional -NameHidingSettings []NameHidingSettingsParameters `json:"nameHidingSettings,omitempty" tf:"name_hiding_settings,omitempty"` + // Queries settings. + // Queries + // +kubebuilder:validation:Optional + Queries []QueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` -// Queries settings. -// Queries -// +kubebuilder:validation:Optional -Queries []QueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` + // Time series settings. + // +kubebuilder:validation:Optional + SeriesOverrides []SeriesOverridesParameters `json:"seriesOverrides,omitempty" tf:"series_overrides,omitempty"` -// Time series settings. -// +kubebuilder:validation:Optional -SeriesOverrides []SeriesOverridesParameters `json:"seriesOverrides,omitempty" tf:"series_overrides,omitempty"` + // Title or empty. + // Chart widget title + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Title or empty. -// Chart widget title -// +kubebuilder:validation:Optional -Title *string `json:"title,omitempty" tf:"title,omitempty"` - -// Visualization settings. -// Visualization settings -// +kubebuilder:validation:Optional -VisualizationSettings []VisualizationSettingsParameters `json:"visualizationSettings,omitempty" tf:"visualization_settings,omitempty"` + // Visualization settings. + // Visualization settings + // +kubebuilder:validation:Optional + VisualizationSettings []VisualizationSettingsParameters `json:"visualizationSettings,omitempty" tf:"visualization_settings,omitempty"` } - type ColorSchemeSettingsInitParameters struct { + // Automatic color scheme. Oneof: automatic, standard or gradient. + // Automatic color scheme + Automatic []AutomaticInitParameters `json:"automatic,omitempty" tf:"automatic,omitempty"` -// Automatic color scheme. Oneof: automatic, standard or gradient. -// Automatic color scheme -Automatic []AutomaticInitParameters `json:"automatic,omitempty" tf:"automatic,omitempty"` - -// Gradient color scheme. Oneof: automatic, standard or gradient. -// Gradient color scheme -Gradient []GradientInitParameters `json:"gradient,omitempty" tf:"gradient,omitempty"` + // Gradient color scheme. Oneof: automatic, standard or gradient. + // Gradient color scheme + Gradient []GradientInitParameters `json:"gradient,omitempty" tf:"gradient,omitempty"` -// Standard color scheme. Oneof: automatic, standard or gradient. -// Standard color scheme -Standard []StandardInitParameters `json:"standard,omitempty" tf:"standard,omitempty"` + // Standard color scheme. Oneof: automatic, standard or gradient. + // Standard color scheme + Standard []StandardInitParameters `json:"standard,omitempty" tf:"standard,omitempty"` } - type ColorSchemeSettingsObservation struct { + // Automatic color scheme. Oneof: automatic, standard or gradient. + // Automatic color scheme + Automatic []AutomaticParameters `json:"automatic,omitempty" tf:"automatic,omitempty"` -// Automatic color scheme. Oneof: automatic, standard or gradient. -// Automatic color scheme -Automatic []AutomaticParameters `json:"automatic,omitempty" tf:"automatic,omitempty"` - -// Gradient color scheme. Oneof: automatic, standard or gradient. -// Gradient color scheme -Gradient []GradientObservation `json:"gradient,omitempty" tf:"gradient,omitempty"` + // Gradient color scheme. Oneof: automatic, standard or gradient. + // Gradient color scheme + Gradient []GradientObservation `json:"gradient,omitempty" tf:"gradient,omitempty"` -// Standard color scheme. Oneof: automatic, standard or gradient. -// Standard color scheme -Standard []StandardParameters `json:"standard,omitempty" tf:"standard,omitempty"` + // Standard color scheme. Oneof: automatic, standard or gradient. + // Standard color scheme + Standard []StandardParameters `json:"standard,omitempty" tf:"standard,omitempty"` } - type ColorSchemeSettingsParameters struct { + // Automatic color scheme. Oneof: automatic, standard or gradient. + // Automatic color scheme + // +kubebuilder:validation:Optional + Automatic []AutomaticParameters `json:"automatic,omitempty" tf:"automatic,omitempty"` -// Automatic color scheme. Oneof: automatic, standard or gradient. -// Automatic color scheme -// +kubebuilder:validation:Optional -Automatic []AutomaticParameters `json:"automatic,omitempty" tf:"automatic,omitempty"` - -// Gradient color scheme. Oneof: automatic, standard or gradient. -// Gradient color scheme -// +kubebuilder:validation:Optional -Gradient []GradientParameters `json:"gradient,omitempty" tf:"gradient,omitempty"` + // Gradient color scheme. Oneof: automatic, standard or gradient. + // Gradient color scheme + // +kubebuilder:validation:Optional + Gradient []GradientParameters `json:"gradient,omitempty" tf:"gradient,omitempty"` -// Standard color scheme. Oneof: automatic, standard or gradient. -// Standard color scheme -// +kubebuilder:validation:Optional -Standard []StandardParameters `json:"standard,omitempty" tf:"standard,omitempty"` + // Standard color scheme. Oneof: automatic, standard or gradient. + // Standard color scheme + // +kubebuilder:validation:Optional + Standard []StandardParameters `json:"standard,omitempty" tf:"standard,omitempty"` } - type CustomInitParameters struct { + // Default value. + // Default value + DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` -// Default value. -// Default value -DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` + // Specifies the multiselectable values of parameter. + // Specifies the multiselectable values of parameter + Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` -// Specifies the multiselectable values of parameter. -// Specifies the multiselectable values of parameter -Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` - -// Parameter values. -// Parameter values -Values []*string `json:"values,omitempty" tf:"values,omitempty"` + // Parameter values. + // Parameter values + Values []*string `json:"values,omitempty" tf:"values,omitempty"` } - type CustomObservation struct { + // Default value. + // Default value + DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` -// Default value. -// Default value -DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` + // Specifies the multiselectable values of parameter. + // Specifies the multiselectable values of parameter + Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` -// Specifies the multiselectable values of parameter. -// Specifies the multiselectable values of parameter -Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` - -// Parameter values. -// Parameter values -Values []*string `json:"values,omitempty" tf:"values,omitempty"` + // Parameter values. + // Parameter values + Values []*string `json:"values,omitempty" tf:"values,omitempty"` } - type CustomParameters struct { + // Default value. + // Default value + // +kubebuilder:validation:Optional + DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` -// Default value. -// Default value -// +kubebuilder:validation:Optional -DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` - -// Specifies the multiselectable values of parameter. -// Specifies the multiselectable values of parameter -// +kubebuilder:validation:Optional -Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` + // Specifies the multiselectable values of parameter. + // Specifies the multiselectable values of parameter + // +kubebuilder:validation:Optional + Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` -// Parameter values. -// Parameter values -// +kubebuilder:validation:Optional -Values []*string `json:"values,omitempty" tf:"values,omitempty"` + // Parameter values. + // Parameter values + // +kubebuilder:validation:Optional + Values []*string `json:"values,omitempty" tf:"values,omitempty"` } - type DashboardInitParameters struct { + // Dashboard description. + // Dashboard description + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Dashboard description. -// Dashboard description -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// Folder that the resource belongs to. If value is omitted, the default provider folder is used. -// Folder ID -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // Folder ID + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of key/value label pairs to assign to the Dashboard. -// Dashboard labels -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Dashboard. + // Dashboard labels + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the Dashboard. -// Dashboard name, used as local identifier in folder_id -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Dashboard. + // Dashboard name, used as local identifier in folder_id + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Dashboard parametrization -// Dashboard parametrization -Parametrization []ParametrizationInitParameters `json:"parametrization,omitempty" tf:"parametrization,omitempty"` + // Dashboard parametrization + // Dashboard parametrization + Parametrization []ParametrizationInitParameters `json:"parametrization,omitempty" tf:"parametrization,omitempty"` -// Dashboard title. -// Dashboard title -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Dashboard title. + // Dashboard title + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Widgets -// Widgets -Widgets []WidgetsInitParameters `json:"widgets,omitempty" tf:"widgets,omitempty"` + // Widgets + // Widgets + Widgets []WidgetsInitParameters `json:"widgets,omitempty" tf:"widgets,omitempty"` } - type DashboardObservation struct { + // Dashboard ID + DashboardID *string `json:"dashboardId,omitempty" tf:"dashboard_id,omitempty"` -// Dashboard ID -DashboardID *string `json:"dashboardId,omitempty" tf:"dashboard_id,omitempty"` - -// Dashboard description. -// Dashboard description -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Dashboard description. + // Dashboard description + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Folder that the resource belongs to. If value is omitted, the default provider folder is used. -// Folder ID -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // Folder ID + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Parameter identifier -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Parameter identifier + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the Dashboard. -// Dashboard labels -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Dashboard. + // Dashboard labels + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the Dashboard. -// Dashboard name, used as local identifier in folder_id -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the Dashboard. + // Dashboard name, used as local identifier in folder_id + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Dashboard parametrization -// Dashboard parametrization -Parametrization []ParametrizationObservation `json:"parametrization,omitempty" tf:"parametrization,omitempty"` + // Dashboard parametrization + // Dashboard parametrization + Parametrization []ParametrizationObservation `json:"parametrization,omitempty" tf:"parametrization,omitempty"` -// Dashboard title. -// Dashboard title -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Dashboard title. + // Dashboard title + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Widgets -// Widgets -Widgets []WidgetsObservation `json:"widgets,omitempty" tf:"widgets,omitempty"` + // Widgets + // Widgets + Widgets []WidgetsObservation `json:"widgets,omitempty" tf:"widgets,omitempty"` } - type DashboardParameters struct { + // Dashboard description. + // Dashboard description + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Dashboard description. -// Dashboard description -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // Folder ID + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder that the resource belongs to. If value is omitted, the default provider folder is used. -// Folder ID -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // A set of key/value label pairs to assign to the Dashboard. + // Dashboard labels + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Dashboard. -// Dashboard labels -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name of the Dashboard. + // Dashboard name, used as local identifier in folder_id + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the Dashboard. -// Dashboard name, used as local identifier in folder_id -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Dashboard parametrization + // Dashboard parametrization + // +kubebuilder:validation:Optional + Parametrization []ParametrizationParameters `json:"parametrization,omitempty" tf:"parametrization,omitempty"` -// Dashboard parametrization -// Dashboard parametrization -// +kubebuilder:validation:Optional -Parametrization []ParametrizationParameters `json:"parametrization,omitempty" tf:"parametrization,omitempty"` + // Dashboard title. + // Dashboard title + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Dashboard title. -// Dashboard title -// +kubebuilder:validation:Optional -Title *string `json:"title,omitempty" tf:"title,omitempty"` - -// Widgets -// Widgets -// +kubebuilder:validation:Optional -Widgets []WidgetsParameters `json:"widgets,omitempty" tf:"widgets,omitempty"` + // Widgets + // Widgets + // +kubebuilder:validation:Optional + Widgets []WidgetsParameters `json:"widgets,omitempty" tf:"widgets,omitempty"` } - type DownsamplingInitParameters struct { + // Disable downsampling. + // Disable downsampling + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Disable downsampling. -// Disable downsampling -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + // Parameters for filling gaps in data. + // Parameters for filling gaps in data + GapFilling *string `json:"gapFilling,omitempty" tf:"gap_filling,omitempty"` -// Parameters for filling gaps in data. -// Parameters for filling gaps in data -GapFilling *string `json:"gapFilling,omitempty" tf:"gap_filling,omitempty"` + // Function that is used for downsampling. + // Function that is used for downsampling + GridAggregation *string `json:"gridAggregation,omitempty" tf:"grid_aggregation,omitempty"` -// Function that is used for downsampling. -// Function that is used for downsampling -GridAggregation *string `json:"gridAggregation,omitempty" tf:"grid_aggregation,omitempty"` + // Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + // Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + GridInterval *float64 `json:"gridInterval,omitempty" tf:"grid_interval,omitempty"` -// Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point -// Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point -GridInterval *float64 `json:"gridInterval,omitempty" tf:"grid_interval,omitempty"` - -// Maximum number of points to be returned. -// Maximum number of points to be returned -MaxPoints *float64 `json:"maxPoints,omitempty" tf:"max_points,omitempty"` + // Maximum number of points to be returned. + // Maximum number of points to be returned + MaxPoints *float64 `json:"maxPoints,omitempty" tf:"max_points,omitempty"` } - type DownsamplingObservation struct { + // Disable downsampling. + // Disable downsampling + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Disable downsampling. -// Disable downsampling -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// Parameters for filling gaps in data. -// Parameters for filling gaps in data -GapFilling *string `json:"gapFilling,omitempty" tf:"gap_filling,omitempty"` + // Parameters for filling gaps in data. + // Parameters for filling gaps in data + GapFilling *string `json:"gapFilling,omitempty" tf:"gap_filling,omitempty"` -// Function that is used for downsampling. -// Function that is used for downsampling -GridAggregation *string `json:"gridAggregation,omitempty" tf:"grid_aggregation,omitempty"` + // Function that is used for downsampling. + // Function that is used for downsampling + GridAggregation *string `json:"gridAggregation,omitempty" tf:"grid_aggregation,omitempty"` -// Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point -// Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point -GridInterval *float64 `json:"gridInterval,omitempty" tf:"grid_interval,omitempty"` + // Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + // Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + GridInterval *float64 `json:"gridInterval,omitempty" tf:"grid_interval,omitempty"` -// Maximum number of points to be returned. -// Maximum number of points to be returned -MaxPoints *float64 `json:"maxPoints,omitempty" tf:"max_points,omitempty"` + // Maximum number of points to be returned. + // Maximum number of points to be returned + MaxPoints *float64 `json:"maxPoints,omitempty" tf:"max_points,omitempty"` } - type DownsamplingParameters struct { + // Disable downsampling. + // Disable downsampling + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Disable downsampling. -// Disable downsampling -// +kubebuilder:validation:Optional -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// Parameters for filling gaps in data. -// Parameters for filling gaps in data -// +kubebuilder:validation:Optional -GapFilling *string `json:"gapFilling,omitempty" tf:"gap_filling,omitempty"` + // Parameters for filling gaps in data. + // Parameters for filling gaps in data + // +kubebuilder:validation:Optional + GapFilling *string `json:"gapFilling,omitempty" tf:"gap_filling,omitempty"` -// Function that is used for downsampling. -// Function that is used for downsampling -// +kubebuilder:validation:Optional -GridAggregation *string `json:"gridAggregation,omitempty" tf:"grid_aggregation,omitempty"` + // Function that is used for downsampling. + // Function that is used for downsampling + // +kubebuilder:validation:Optional + GridAggregation *string `json:"gridAggregation,omitempty" tf:"grid_aggregation,omitempty"` -// Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point -// Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point -// +kubebuilder:validation:Optional -GridInterval *float64 `json:"gridInterval,omitempty" tf:"grid_interval,omitempty"` + // Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + // Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + // +kubebuilder:validation:Optional + GridInterval *float64 `json:"gridInterval,omitempty" tf:"grid_interval,omitempty"` -// Maximum number of points to be returned. -// Maximum number of points to be returned -// +kubebuilder:validation:Optional -MaxPoints *float64 `json:"maxPoints,omitempty" tf:"max_points,omitempty"` + // Maximum number of points to be returned. + // Maximum number of points to be returned + // +kubebuilder:validation:Optional + MaxPoints *float64 `json:"maxPoints,omitempty" tf:"max_points,omitempty"` } - type GradientInitParameters struct { + // Gradient green value. + // Gradient green value + GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` -// Gradient green value. -// Gradient green value -GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` - -// Gradient red value. -// Gradient red value -RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` + // Gradient red value. + // Gradient red value + RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` -// Gradient violet value. -// Gradient violet_value -VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` + // Gradient violet value. + // Gradient violet_value + VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` -// Gradient yellow value. -// Gradient yellow value -YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` + // Gradient yellow value. + // Gradient yellow value + YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` } - type GradientObservation struct { + // Gradient green value. + // Gradient green value + GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` -// Gradient green value. -// Gradient green value -GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` + // Gradient red value. + // Gradient red value + RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` -// Gradient red value. -// Gradient red value -RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` + // Gradient violet value. + // Gradient violet_value + VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` -// Gradient violet value. -// Gradient violet_value -VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` - -// Gradient yellow value. -// Gradient yellow value -YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` + // Gradient yellow value. + // Gradient yellow value + YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` } - type GradientParameters struct { + // Gradient green value. + // Gradient green value + // +kubebuilder:validation:Optional + GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` -// Gradient green value. -// Gradient green value -// +kubebuilder:validation:Optional -GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` + // Gradient red value. + // Gradient red value + // +kubebuilder:validation:Optional + RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` -// Gradient red value. -// Gradient red value -// +kubebuilder:validation:Optional -RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` + // Gradient violet value. + // Gradient violet_value + // +kubebuilder:validation:Optional + VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` -// Gradient violet value. -// Gradient violet_value -// +kubebuilder:validation:Optional -VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` - -// Gradient yellow value. -// Gradient yellow value -// +kubebuilder:validation:Optional -YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` + // Gradient yellow value. + // Gradient yellow value + // +kubebuilder:validation:Optional + YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` } - type HeatmapSettingsInitParameters struct { + // Gradient green value. + // Heatmap green value + GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` -// Gradient green value. -// Heatmap green value -GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` - -// Gradient red value. -// Heatmap red value -RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` + // Gradient red value. + // Heatmap red value + RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` -// Gradient violet value. -// Heatmap violet_value -VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` + // Gradient violet value. + // Heatmap violet_value + VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` -// Gradient yellow value. -// Heatmap yellow value -YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` + // Gradient yellow value. + // Heatmap yellow value + YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` } - type HeatmapSettingsObservation struct { + // Gradient green value. + // Heatmap green value + GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` -// Gradient green value. -// Heatmap green value -GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` - -// Gradient red value. -// Heatmap red value -RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` + // Gradient red value. + // Heatmap red value + RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` -// Gradient violet value. -// Heatmap violet_value -VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` + // Gradient violet value. + // Heatmap violet_value + VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` -// Gradient yellow value. -// Heatmap yellow value -YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` + // Gradient yellow value. + // Heatmap yellow value + YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` } - type HeatmapSettingsParameters struct { + // Gradient green value. + // Heatmap green value + // +kubebuilder:validation:Optional + GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` -// Gradient green value. -// Heatmap green value -// +kubebuilder:validation:Optional -GreenValue *string `json:"greenValue,omitempty" tf:"green_value,omitempty"` - -// Gradient red value. -// Heatmap red value -// +kubebuilder:validation:Optional -RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` + // Gradient red value. + // Heatmap red value + // +kubebuilder:validation:Optional + RedValue *string `json:"redValue,omitempty" tf:"red_value,omitempty"` -// Gradient violet value. -// Heatmap violet_value -// +kubebuilder:validation:Optional -VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` + // Gradient violet value. + // Heatmap violet_value + // +kubebuilder:validation:Optional + VioletValue *string `json:"violetValue,omitempty" tf:"violet_value,omitempty"` -// Gradient yellow value. -// Heatmap yellow value -// +kubebuilder:validation:Optional -YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` + // Gradient yellow value. + // Heatmap yellow value + // +kubebuilder:validation:Optional + YellowValue *string `json:"yellowValue,omitempty" tf:"yellow_value,omitempty"` } - type LabelValuesInitParameters struct { + // Default value. + // Default value + DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` -// Default value. -// Default value -DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` + // Labels folder ID. + // Folder ID + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Labels folder ID. -// Folder ID -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Label key to list label values. + // Required. Label key to list label values + LabelKey *string `json:"labelKey,omitempty" tf:"label_key,omitempty"` -// Label key to list label values. -// Required. Label key to list label values -LabelKey *string `json:"labelKey,omitempty" tf:"label_key,omitempty"` + // Specifies the multiselectable values of parameter. + // Specifies the multiselectable values of parameter + Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` -// Specifies the multiselectable values of parameter. -// Specifies the multiselectable values of parameter -Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` - -// dashboard predefined parameters selector. -// Required. Selectors to select metric label values -Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` + // dashboard predefined parameters selector. + // Required. Selectors to select metric label values + Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` } - type LabelValuesObservation struct { + // Default value. + // Default value + DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` -// Default value. -// Default value -DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` + // Labels folder ID. + // Folder ID + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Labels folder ID. -// Folder ID -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Label key to list label values. + // Required. Label key to list label values + LabelKey *string `json:"labelKey,omitempty" tf:"label_key,omitempty"` -// Label key to list label values. -// Required. Label key to list label values -LabelKey *string `json:"labelKey,omitempty" tf:"label_key,omitempty"` + // Specifies the multiselectable values of parameter. + // Specifies the multiselectable values of parameter + Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` -// Specifies the multiselectable values of parameter. -// Specifies the multiselectable values of parameter -Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` - -// dashboard predefined parameters selector. -// Required. Selectors to select metric label values -Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` + // dashboard predefined parameters selector. + // Required. Selectors to select metric label values + Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` } - type LabelValuesParameters struct { + // Default value. + // Default value + // +kubebuilder:validation:Optional + DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` -// Default value. -// Default value -// +kubebuilder:validation:Optional -DefaultValues []*string `json:"defaultValues,omitempty" tf:"default_values,omitempty"` - -// Labels folder ID. -// Folder ID -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Labels folder ID. + // Folder ID + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Label key to list label values. -// Required. Label key to list label values -// +kubebuilder:validation:Optional -LabelKey *string `json:"labelKey" tf:"label_key,omitempty"` + // Label key to list label values. + // Required. Label key to list label values + // +kubebuilder:validation:Optional + LabelKey *string `json:"labelKey" tf:"label_key,omitempty"` -// Specifies the multiselectable values of parameter. -// Specifies the multiselectable values of parameter -// +kubebuilder:validation:Optional -Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` + // Specifies the multiselectable values of parameter. + // Specifies the multiselectable values of parameter + // +kubebuilder:validation:Optional + Multiselectable *bool `json:"multiselectable,omitempty" tf:"multiselectable,omitempty"` -// dashboard predefined parameters selector. -// Required. Selectors to select metric label values -// +kubebuilder:validation:Optional -Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` + // dashboard predefined parameters selector. + // Required. Selectors to select metric label values + // +kubebuilder:validation:Optional + Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` } - type LeftInitParameters struct { + // Max value in extended number format or empty. + // Max value in extended number format or empty + Max *string `json:"max,omitempty" tf:"max,omitempty"` -// Max value in extended number format or empty. -// Max value in extended number format or empty -Max *string `json:"max,omitempty" tf:"max,omitempty"` - -// Min value in extended number format or empty. -// Min value in extended number format or empty -Min *string `json:"min,omitempty" tf:"min,omitempty"` + // Min value in extended number format or empty. + // Min value in extended number format or empty + Min *string `json:"min,omitempty" tf:"min,omitempty"` -// Tick value precision (null as default, 0-7 in other cases). -// Tick value precision (null as default, 0-7 in other cases) -Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` + // Tick value precision (null as default, 0-7 in other cases). + // Tick value precision (null as default, 0-7 in other cases) + Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` -// Title or empty. -// Title or empty -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Title or empty. + // Title or empty + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Type. Values: -// Type -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type. Values: + // Type + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Unit format. Values: -// Unit format -UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` + // Unit format. Values: + // Unit format + UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` } - type LeftObservation struct { + // Max value in extended number format or empty. + // Max value in extended number format or empty + Max *string `json:"max,omitempty" tf:"max,omitempty"` -// Max value in extended number format or empty. -// Max value in extended number format or empty -Max *string `json:"max,omitempty" tf:"max,omitempty"` - -// Min value in extended number format or empty. -// Min value in extended number format or empty -Min *string `json:"min,omitempty" tf:"min,omitempty"` + // Min value in extended number format or empty. + // Min value in extended number format or empty + Min *string `json:"min,omitempty" tf:"min,omitempty"` -// Tick value precision (null as default, 0-7 in other cases). -// Tick value precision (null as default, 0-7 in other cases) -Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` + // Tick value precision (null as default, 0-7 in other cases). + // Tick value precision (null as default, 0-7 in other cases) + Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` -// Title or empty. -// Title or empty -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Title or empty. + // Title or empty + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Type. Values: -// Type -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type. Values: + // Type + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Unit format. Values: -// Unit format -UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` + // Unit format. Values: + // Unit format + UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` } - type LeftParameters struct { + // Max value in extended number format or empty. + // Max value in extended number format or empty + // +kubebuilder:validation:Optional + Max *string `json:"max,omitempty" tf:"max,omitempty"` -// Max value in extended number format or empty. -// Max value in extended number format or empty -// +kubebuilder:validation:Optional -Max *string `json:"max,omitempty" tf:"max,omitempty"` + // Min value in extended number format or empty. + // Min value in extended number format or empty + // +kubebuilder:validation:Optional + Min *string `json:"min,omitempty" tf:"min,omitempty"` -// Min value in extended number format or empty. -// Min value in extended number format or empty -// +kubebuilder:validation:Optional -Min *string `json:"min,omitempty" tf:"min,omitempty"` + // Tick value precision (null as default, 0-7 in other cases). + // Tick value precision (null as default, 0-7 in other cases) + // +kubebuilder:validation:Optional + Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` -// Tick value precision (null as default, 0-7 in other cases). -// Tick value precision (null as default, 0-7 in other cases) -// +kubebuilder:validation:Optional -Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` + // Title or empty. + // Title or empty + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Title or empty. -// Title or empty -// +kubebuilder:validation:Optional -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Type. Values: + // Type + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Type. Values: -// Type -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// Unit format. Values: -// Unit format -// +kubebuilder:validation:Optional -UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` + // Unit format. Values: + // Unit format + // +kubebuilder:validation:Optional + UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` } - type NameHidingSettingsInitParameters struct { + // Series name. + Names []*string `json:"names,omitempty" tf:"names,omitempty"` -// Series name. -Names []*string `json:"names,omitempty" tf:"names,omitempty"` - -// True if we want to show concrete series names only, false if we want to hide concrete series names. -// True if we want to show concrete series names only, false if we want to hide concrete series names -Positive *bool `json:"positive,omitempty" tf:"positive,omitempty"` + // True if we want to show concrete series names only, false if we want to hide concrete series names. + // True if we want to show concrete series names only, false if we want to hide concrete series names + Positive *bool `json:"positive,omitempty" tf:"positive,omitempty"` } - type NameHidingSettingsObservation struct { + // Series name. + Names []*string `json:"names,omitempty" tf:"names,omitempty"` -// Series name. -Names []*string `json:"names,omitempty" tf:"names,omitempty"` - -// True if we want to show concrete series names only, false if we want to hide concrete series names. -// True if we want to show concrete series names only, false if we want to hide concrete series names -Positive *bool `json:"positive,omitempty" tf:"positive,omitempty"` + // True if we want to show concrete series names only, false if we want to hide concrete series names. + // True if we want to show concrete series names only, false if we want to hide concrete series names + Positive *bool `json:"positive,omitempty" tf:"positive,omitempty"` } - type NameHidingSettingsParameters struct { + // Series name. + // +kubebuilder:validation:Optional + Names []*string `json:"names,omitempty" tf:"names,omitempty"` -// Series name. -// +kubebuilder:validation:Optional -Names []*string `json:"names,omitempty" tf:"names,omitempty"` - -// True if we want to show concrete series names only, false if we want to hide concrete series names. -// True if we want to show concrete series names only, false if we want to hide concrete series names -// +kubebuilder:validation:Optional -Positive *bool `json:"positive,omitempty" tf:"positive,omitempty"` + // True if we want to show concrete series names only, false if we want to hide concrete series names. + // True if we want to show concrete series names only, false if we want to hide concrete series names + // +kubebuilder:validation:Optional + Positive *bool `json:"positive,omitempty" tf:"positive,omitempty"` } - type ParametersInitParameters struct { + // Custom values parameter. Oneof: label_values, custom, text. + // Custom parameter + Custom []CustomInitParameters `json:"custom,omitempty" tf:"custom,omitempty"` -// Custom values parameter. Oneof: label_values, custom, text. -// Custom parameter -Custom []CustomInitParameters `json:"custom,omitempty" tf:"custom,omitempty"` - -// Parameter description. -// Parameter description -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Parameter description. + // Parameter description + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// UI-visibility. -// UI-visibility -Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` + // UI-visibility. + // UI-visibility + Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` -// Parameter identifier -// Parameter identifier -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Parameter identifier + // Parameter identifier + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Label values parameter. Oneof: label_values, custom, text. -// Label values parameter -LabelValues []LabelValuesInitParameters `json:"labelValues,omitempty" tf:"label_values,omitempty"` + // Label values parameter. Oneof: label_values, custom, text. + // Label values parameter + LabelValues []LabelValuesInitParameters `json:"labelValues,omitempty" tf:"label_values,omitempty"` -// Text parameter. Oneof: label_values, custom, text. -// Text parameter -Text []TextInitParameters `json:"text,omitempty" tf:"text,omitempty"` + // Text parameter. Oneof: label_values, custom, text. + // Text parameter + Text []TextInitParameters `json:"text,omitempty" tf:"text,omitempty"` -// UI-visible title of the parameter. -// UI-visible title of the parameter -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // UI-visible title of the parameter. + // UI-visible title of the parameter + Title *string `json:"title,omitempty" tf:"title,omitempty"` } - type ParametersObservation struct { + // Custom values parameter. Oneof: label_values, custom, text. + // Custom parameter + Custom []CustomObservation `json:"custom,omitempty" tf:"custom,omitempty"` -// Custom values parameter. Oneof: label_values, custom, text. -// Custom parameter -Custom []CustomObservation `json:"custom,omitempty" tf:"custom,omitempty"` + // Parameter description. + // Parameter description + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Parameter description. -// Parameter description -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // UI-visibility. + // UI-visibility + Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` -// UI-visibility. -// UI-visibility -Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` + // Parameter identifier + // Parameter identifier + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Parameter identifier -// Parameter identifier -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Label values parameter. Oneof: label_values, custom, text. + // Label values parameter + LabelValues []LabelValuesObservation `json:"labelValues,omitempty" tf:"label_values,omitempty"` -// Label values parameter. Oneof: label_values, custom, text. -// Label values parameter -LabelValues []LabelValuesObservation `json:"labelValues,omitempty" tf:"label_values,omitempty"` + // Text parameter. Oneof: label_values, custom, text. + // Text parameter + Text []TextObservation `json:"text,omitempty" tf:"text,omitempty"` -// Text parameter. Oneof: label_values, custom, text. -// Text parameter -Text []TextObservation `json:"text,omitempty" tf:"text,omitempty"` - -// UI-visible title of the parameter. -// UI-visible title of the parameter -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // UI-visible title of the parameter. + // UI-visible title of the parameter + Title *string `json:"title,omitempty" tf:"title,omitempty"` } - type ParametersParameters struct { + // Custom values parameter. Oneof: label_values, custom, text. + // Custom parameter + // +kubebuilder:validation:Optional + Custom []CustomParameters `json:"custom,omitempty" tf:"custom,omitempty"` -// Custom values parameter. Oneof: label_values, custom, text. -// Custom parameter -// +kubebuilder:validation:Optional -Custom []CustomParameters `json:"custom,omitempty" tf:"custom,omitempty"` + // Parameter description. + // Parameter description + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Parameter description. -// Parameter description -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // UI-visibility. + // UI-visibility + // +kubebuilder:validation:Optional + Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` -// UI-visibility. -// UI-visibility -// +kubebuilder:validation:Optional -Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` + // Parameter identifier + // Parameter identifier + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` -// Parameter identifier -// Parameter identifier -// +kubebuilder:validation:Optional -ID *string `json:"id" tf:"id,omitempty"` + // Label values parameter. Oneof: label_values, custom, text. + // Label values parameter + // +kubebuilder:validation:Optional + LabelValues []LabelValuesParameters `json:"labelValues,omitempty" tf:"label_values,omitempty"` -// Label values parameter. Oneof: label_values, custom, text. -// Label values parameter -// +kubebuilder:validation:Optional -LabelValues []LabelValuesParameters `json:"labelValues,omitempty" tf:"label_values,omitempty"` + // Text parameter. Oneof: label_values, custom, text. + // Text parameter + // +kubebuilder:validation:Optional + Text []TextParameters `json:"text,omitempty" tf:"text,omitempty"` -// Text parameter. Oneof: label_values, custom, text. -// Text parameter -// +kubebuilder:validation:Optional -Text []TextParameters `json:"text,omitempty" tf:"text,omitempty"` - -// UI-visible title of the parameter. -// UI-visible title of the parameter -// +kubebuilder:validation:Optional -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // UI-visible title of the parameter. + // UI-visible title of the parameter + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` } - type ParametrizationInitParameters struct { + // parameters list. + // Dashboard parameter + Parameters []ParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` -// parameters list. -// Dashboard parameter -Parameters []ParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` - -// dashboard predefined parameters selector. -// Predefined selectors -Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` + // dashboard predefined parameters selector. + // Predefined selectors + Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` } - type ParametrizationObservation struct { + // parameters list. + // Dashboard parameter + Parameters []ParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` -// parameters list. -// Dashboard parameter -Parameters []ParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` - -// dashboard predefined parameters selector. -// Predefined selectors -Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` + // dashboard predefined parameters selector. + // Predefined selectors + Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` } - type ParametrizationParameters struct { + // parameters list. + // Dashboard parameter + // +kubebuilder:validation:Optional + Parameters []ParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` -// parameters list. -// Dashboard parameter -// +kubebuilder:validation:Optional -Parameters []ParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` - -// dashboard predefined parameters selector. -// Predefined selectors -// +kubebuilder:validation:Optional -Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` + // dashboard predefined parameters selector. + // Predefined selectors + // +kubebuilder:validation:Optional + Selectors *string `json:"selectors,omitempty" tf:"selectors,omitempty"` } - type PositionInitParameters struct { + // Height. + // Required. Height + H *float64 `json:"h,omitempty" tf:"h,omitempty"` -// Height. -// Required. Height -H *float64 `json:"h,omitempty" tf:"h,omitempty"` + // Width. + // Required. Weight + W *float64 `json:"w,omitempty" tf:"w,omitempty"` -// Width. -// Required. Weight -W *float64 `json:"w,omitempty" tf:"w,omitempty"` + // X-axis top-left corner coordinate. + // Required. X-axis top-left corner coordinate + X *float64 `json:"x,omitempty" tf:"x,omitempty"` -// X-axis top-left corner coordinate. -// Required. X-axis top-left corner coordinate -X *float64 `json:"x,omitempty" tf:"x,omitempty"` - -// Y-axis top-left corner coordinate. -// Required. Y-axis top-left corner coordinate -Y *float64 `json:"y,omitempty" tf:"y,omitempty"` + // Y-axis top-left corner coordinate. + // Required. Y-axis top-left corner coordinate + Y *float64 `json:"y,omitempty" tf:"y,omitempty"` } - type PositionObservation struct { + // Height. + // Required. Height + H *float64 `json:"h,omitempty" tf:"h,omitempty"` -// Height. -// Required. Height -H *float64 `json:"h,omitempty" tf:"h,omitempty"` - -// Width. -// Required. Weight -W *float64 `json:"w,omitempty" tf:"w,omitempty"` + // Width. + // Required. Weight + W *float64 `json:"w,omitempty" tf:"w,omitempty"` -// X-axis top-left corner coordinate. -// Required. X-axis top-left corner coordinate -X *float64 `json:"x,omitempty" tf:"x,omitempty"` + // X-axis top-left corner coordinate. + // Required. X-axis top-left corner coordinate + X *float64 `json:"x,omitempty" tf:"x,omitempty"` -// Y-axis top-left corner coordinate. -// Required. Y-axis top-left corner coordinate -Y *float64 `json:"y,omitempty" tf:"y,omitempty"` + // Y-axis top-left corner coordinate. + // Required. Y-axis top-left corner coordinate + Y *float64 `json:"y,omitempty" tf:"y,omitempty"` } - type PositionParameters struct { + // Height. + // Required. Height + // +kubebuilder:validation:Optional + H *float64 `json:"h,omitempty" tf:"h,omitempty"` -// Height. -// Required. Height -// +kubebuilder:validation:Optional -H *float64 `json:"h,omitempty" tf:"h,omitempty"` - -// Width. -// Required. Weight -// +kubebuilder:validation:Optional -W *float64 `json:"w,omitempty" tf:"w,omitempty"` + // Width. + // Required. Weight + // +kubebuilder:validation:Optional + W *float64 `json:"w,omitempty" tf:"w,omitempty"` -// X-axis top-left corner coordinate. -// Required. X-axis top-left corner coordinate -// +kubebuilder:validation:Optional -X *float64 `json:"x,omitempty" tf:"x,omitempty"` + // X-axis top-left corner coordinate. + // Required. X-axis top-left corner coordinate + // +kubebuilder:validation:Optional + X *float64 `json:"x,omitempty" tf:"x,omitempty"` -// Y-axis top-left corner coordinate. -// Required. Y-axis top-left corner coordinate -// +kubebuilder:validation:Optional -Y *float64 `json:"y,omitempty" tf:"y,omitempty"` + // Y-axis top-left corner coordinate. + // Required. Y-axis top-left corner coordinate + // +kubebuilder:validation:Optional + Y *float64 `json:"y,omitempty" tf:"y,omitempty"` } - type QueriesInitParameters struct { + // Downsamplang settings. + // Downsampling settings + Downsampling []DownsamplingInitParameters `json:"downsampling,omitempty" tf:"downsampling,omitempty"` -// Downsamplang settings. -// Downsampling settings -Downsampling []DownsamplingInitParameters `json:"downsampling,omitempty" tf:"downsampling,omitempty"` - -// Query targets. -// Downsampling settings -Target []TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` + // Query targets. + // Downsampling settings + Target []TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` } - type QueriesObservation struct { + // Downsamplang settings. + // Downsampling settings + Downsampling []DownsamplingObservation `json:"downsampling,omitempty" tf:"downsampling,omitempty"` -// Downsamplang settings. -// Downsampling settings -Downsampling []DownsamplingObservation `json:"downsampling,omitempty" tf:"downsampling,omitempty"` - -// Query targets. -// Downsampling settings -Target []TargetObservation `json:"target,omitempty" tf:"target,omitempty"` + // Query targets. + // Downsampling settings + Target []TargetObservation `json:"target,omitempty" tf:"target,omitempty"` } - type QueriesParameters struct { + // Downsamplang settings. + // Downsampling settings + // +kubebuilder:validation:Optional + Downsampling []DownsamplingParameters `json:"downsampling,omitempty" tf:"downsampling,omitempty"` -// Downsamplang settings. -// Downsampling settings -// +kubebuilder:validation:Optional -Downsampling []DownsamplingParameters `json:"downsampling,omitempty" tf:"downsampling,omitempty"` - -// Query targets. -// Downsampling settings -// +kubebuilder:validation:Optional -Target []TargetParameters `json:"target,omitempty" tf:"target,omitempty"` + // Query targets. + // Downsampling settings + // +kubebuilder:validation:Optional + Target []TargetParameters `json:"target,omitempty" tf:"target,omitempty"` } - type RightInitParameters struct { + // Max value in extended number format or empty. + // Max value in extended number format or empty + Max *string `json:"max,omitempty" tf:"max,omitempty"` -// Max value in extended number format or empty. -// Max value in extended number format or empty -Max *string `json:"max,omitempty" tf:"max,omitempty"` - -// Min value in extended number format or empty. -// Min value in extended number format or empty -Min *string `json:"min,omitempty" tf:"min,omitempty"` + // Min value in extended number format or empty. + // Min value in extended number format or empty + Min *string `json:"min,omitempty" tf:"min,omitempty"` -// Tick value precision (null as default, 0-7 in other cases). -// Tick value precision (null as default, 0-7 in other cases) -Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` + // Tick value precision (null as default, 0-7 in other cases). + // Tick value precision (null as default, 0-7 in other cases) + Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` -// Title or empty. -// Title or empty -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Title or empty. + // Title or empty + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Type. Values: -// Type -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type. Values: + // Type + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Unit format. Values: -// Unit format -UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` + // Unit format. Values: + // Unit format + UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` } - type RightObservation struct { + // Max value in extended number format or empty. + // Max value in extended number format or empty + Max *string `json:"max,omitempty" tf:"max,omitempty"` -// Max value in extended number format or empty. -// Max value in extended number format or empty -Max *string `json:"max,omitempty" tf:"max,omitempty"` - -// Min value in extended number format or empty. -// Min value in extended number format or empty -Min *string `json:"min,omitempty" tf:"min,omitempty"` + // Min value in extended number format or empty. + // Min value in extended number format or empty + Min *string `json:"min,omitempty" tf:"min,omitempty"` -// Tick value precision (null as default, 0-7 in other cases). -// Tick value precision (null as default, 0-7 in other cases) -Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` + // Tick value precision (null as default, 0-7 in other cases). + // Tick value precision (null as default, 0-7 in other cases) + Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` -// Title or empty. -// Title or empty -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Title or empty. + // Title or empty + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Type. Values: -// Type -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type. Values: + // Type + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Unit format. Values: -// Unit format -UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` + // Unit format. Values: + // Unit format + UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` } - type RightParameters struct { + // Max value in extended number format or empty. + // Max value in extended number format or empty + // +kubebuilder:validation:Optional + Max *string `json:"max,omitempty" tf:"max,omitempty"` -// Max value in extended number format or empty. -// Max value in extended number format or empty -// +kubebuilder:validation:Optional -Max *string `json:"max,omitempty" tf:"max,omitempty"` + // Min value in extended number format or empty. + // Min value in extended number format or empty + // +kubebuilder:validation:Optional + Min *string `json:"min,omitempty" tf:"min,omitempty"` -// Min value in extended number format or empty. -// Min value in extended number format or empty -// +kubebuilder:validation:Optional -Min *string `json:"min,omitempty" tf:"min,omitempty"` + // Tick value precision (null as default, 0-7 in other cases). + // Tick value precision (null as default, 0-7 in other cases) + // +kubebuilder:validation:Optional + Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` -// Tick value precision (null as default, 0-7 in other cases). -// Tick value precision (null as default, 0-7 in other cases) -// +kubebuilder:validation:Optional -Precision *float64 `json:"precision,omitempty" tf:"precision,omitempty"` + // Title or empty. + // Title or empty + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Title or empty. -// Title or empty -// +kubebuilder:validation:Optional -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Type. Values: + // Type + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Type. Values: -// Type -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// Unit format. Values: -// Unit format -// +kubebuilder:validation:Optional -UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` + // Unit format. Values: + // Unit format + // +kubebuilder:validation:Optional + UnitFormat *string `json:"unitFormat,omitempty" tf:"unit_format,omitempty"` } - type SeriesOverridesInitParameters struct { + // Series name or empty. + // Series name + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Series name or empty. -// Series name -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Override settings. + // Override settings + Settings []SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` -// Override settings. -// Override settings -Settings []SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` - -// Series index. Oneof: name or target_index. -// Target index -TargetIndex *string `json:"targetIndex,omitempty" tf:"target_index,omitempty"` + // Series index. Oneof: name or target_index. + // Target index + TargetIndex *string `json:"targetIndex,omitempty" tf:"target_index,omitempty"` } - type SeriesOverridesObservation struct { + // Series name or empty. + // Series name + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Series name or empty. -// Series name -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Override settings. -// Override settings -Settings []SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` + // Override settings. + // Override settings + Settings []SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` -// Series index. Oneof: name or target_index. -// Target index -TargetIndex *string `json:"targetIndex,omitempty" tf:"target_index,omitempty"` + // Series index. Oneof: name or target_index. + // Target index + TargetIndex *string `json:"targetIndex,omitempty" tf:"target_index,omitempty"` } - type SeriesOverridesParameters struct { + // Series name or empty. + // Series name + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Series name or empty. -// Series name -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// Override settings. -// Override settings -// +kubebuilder:validation:Optional -Settings []SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` + // Override settings. + // Override settings + // +kubebuilder:validation:Optional + Settings []SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` -// Series index. Oneof: name or target_index. -// Target index -// +kubebuilder:validation:Optional -TargetIndex *string `json:"targetIndex,omitempty" tf:"target_index,omitempty"` + // Series index. Oneof: name or target_index. + // Target index + // +kubebuilder:validation:Optional + TargetIndex *string `json:"targetIndex,omitempty" tf:"target_index,omitempty"` } - type SettingsInitParameters struct { + // Series color or empty. + // Series color or empty + Color *string `json:"color,omitempty" tf:"color,omitempty"` -// Series color or empty. -// Series color or empty -Color *string `json:"color,omitempty" tf:"color,omitempty"` - -// Stack grow down. -// Stack grow down -GrowDown *bool `json:"growDown,omitempty" tf:"grow_down,omitempty"` + // Stack grow down. + // Stack grow down + GrowDown *bool `json:"growDown,omitempty" tf:"grow_down,omitempty"` -// Series name or empty. -// Series name or empty -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Series name or empty. + // Series name or empty + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Stack name or empty. -// Stack name or empty -StackName *string `json:"stackName,omitempty" tf:"stack_name,omitempty"` + // Stack name or empty. + // Stack name or empty + StackName *string `json:"stackName,omitempty" tf:"stack_name,omitempty"` -// Type. Values: -// Type -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type. Values: + // Type + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Yaxis position. -// Yaxis position -YaxisPosition *string `json:"yaxisPosition,omitempty" tf:"yaxis_position,omitempty"` + // Yaxis position. + // Yaxis position + YaxisPosition *string `json:"yaxisPosition,omitempty" tf:"yaxis_position,omitempty"` } - type SettingsObservation struct { + // Series color or empty. + // Series color or empty + Color *string `json:"color,omitempty" tf:"color,omitempty"` -// Series color or empty. -// Series color or empty -Color *string `json:"color,omitempty" tf:"color,omitempty"` + // Stack grow down. + // Stack grow down + GrowDown *bool `json:"growDown,omitempty" tf:"grow_down,omitempty"` -// Stack grow down. -// Stack grow down -GrowDown *bool `json:"growDown,omitempty" tf:"grow_down,omitempty"` + // Series name or empty. + // Series name or empty + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Series name or empty. -// Series name or empty -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Stack name or empty. + // Stack name or empty + StackName *string `json:"stackName,omitempty" tf:"stack_name,omitempty"` -// Stack name or empty. -// Stack name or empty -StackName *string `json:"stackName,omitempty" tf:"stack_name,omitempty"` + // Type. Values: + // Type + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Type. Values: -// Type -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// Yaxis position. -// Yaxis position -YaxisPosition *string `json:"yaxisPosition,omitempty" tf:"yaxis_position,omitempty"` + // Yaxis position. + // Yaxis position + YaxisPosition *string `json:"yaxisPosition,omitempty" tf:"yaxis_position,omitempty"` } - type SettingsParameters struct { + // Series color or empty. + // Series color or empty + // +kubebuilder:validation:Optional + Color *string `json:"color,omitempty" tf:"color,omitempty"` -// Series color or empty. -// Series color or empty -// +kubebuilder:validation:Optional -Color *string `json:"color,omitempty" tf:"color,omitempty"` + // Stack grow down. + // Stack grow down + // +kubebuilder:validation:Optional + GrowDown *bool `json:"growDown,omitempty" tf:"grow_down,omitempty"` -// Stack grow down. -// Stack grow down -// +kubebuilder:validation:Optional -GrowDown *bool `json:"growDown,omitempty" tf:"grow_down,omitempty"` + // Series name or empty. + // Series name or empty + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Series name or empty. -// Series name or empty -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Stack name or empty. + // Stack name or empty + // +kubebuilder:validation:Optional + StackName *string `json:"stackName,omitempty" tf:"stack_name,omitempty"` -// Stack name or empty. -// Stack name or empty -// +kubebuilder:validation:Optional -StackName *string `json:"stackName,omitempty" tf:"stack_name,omitempty"` + // Type. Values: + // Type + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Type. Values: -// Type -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// Yaxis position. -// Yaxis position -// +kubebuilder:validation:Optional -YaxisPosition *string `json:"yaxisPosition,omitempty" tf:"yaxis_position,omitempty"` + // Yaxis position. + // Yaxis position + // +kubebuilder:validation:Optional + YaxisPosition *string `json:"yaxisPosition,omitempty" tf:"yaxis_position,omitempty"` } - type StandardInitParameters struct { - } - type StandardObservation struct { - } - type StandardParameters struct { - } - type TargetInitParameters struct { + // Checks that target is visible or invisible. + // Checks that target is visible or invisible + Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` -// Checks that target is visible or invisible. -// Checks that target is visible or invisible -Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` + // Query. + // Required. Query + Query *string `json:"query,omitempty" tf:"query,omitempty"` -// Query. -// Required. Query -Query *string `json:"query,omitempty" tf:"query,omitempty"` - -// Text mode enabled. -// Text mode -TextMode *bool `json:"textMode,omitempty" tf:"text_mode,omitempty"` + // Text mode enabled. + // Text mode + TextMode *bool `json:"textMode,omitempty" tf:"text_mode,omitempty"` } - type TargetObservation struct { + // Checks that target is visible or invisible. + // Checks that target is visible or invisible + Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` -// Checks that target is visible or invisible. -// Checks that target is visible or invisible -Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` + // Query. + // Required. Query + Query *string `json:"query,omitempty" tf:"query,omitempty"` -// Query. -// Required. Query -Query *string `json:"query,omitempty" tf:"query,omitempty"` - -// Text mode enabled. -// Text mode -TextMode *bool `json:"textMode,omitempty" tf:"text_mode,omitempty"` + // Text mode enabled. + // Text mode + TextMode *bool `json:"textMode,omitempty" tf:"text_mode,omitempty"` } - type TargetParameters struct { + // Checks that target is visible or invisible. + // Checks that target is visible or invisible + // +kubebuilder:validation:Optional + Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` -// Checks that target is visible or invisible. -// Checks that target is visible or invisible -// +kubebuilder:validation:Optional -Hidden *bool `json:"hidden,omitempty" tf:"hidden,omitempty"` - -// Query. -// Required. Query -// +kubebuilder:validation:Optional -Query *string `json:"query,omitempty" tf:"query,omitempty"` + // Query. + // Required. Query + // +kubebuilder:validation:Optional + Query *string `json:"query,omitempty" tf:"query,omitempty"` -// Text mode enabled. -// Text mode -// +kubebuilder:validation:Optional -TextMode *bool `json:"textMode,omitempty" tf:"text_mode,omitempty"` + // Text mode enabled. + // Text mode + // +kubebuilder:validation:Optional + TextMode *bool `json:"textMode,omitempty" tf:"text_mode,omitempty"` } - type TextInitParameters struct { - -// Default value. -// Default value -DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + // Default value. + // Default value + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` } - type TextObservation struct { - -// Default value. -// Default value -DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + // Default value. + // Default value + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` } - type TextParameters struct { - -// Default value. -// Default value -// +kubebuilder:validation:Optional -DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + // Default value. + // Default value + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` } - type TitleInitParameters struct { + // Title size. Values: + // Title size + Size *string `json:"size,omitempty" tf:"size,omitempty"` -// Title size. Values: -// Title size -Size *string `json:"size,omitempty" tf:"size,omitempty"` - -// Text widget settings. Oneof: text, title or chart. -// Title text -Text *string `json:"text,omitempty" tf:"text,omitempty"` + // Text widget settings. Oneof: text, title or chart. + // Title text + Text *string `json:"text,omitempty" tf:"text,omitempty"` } - type TitleObservation struct { + // Title size. Values: + // Title size + Size *string `json:"size,omitempty" tf:"size,omitempty"` -// Title size. Values: -// Title size -Size *string `json:"size,omitempty" tf:"size,omitempty"` - -// Text widget settings. Oneof: text, title or chart. -// Title text -Text *string `json:"text,omitempty" tf:"text,omitempty"` + // Text widget settings. Oneof: text, title or chart. + // Title text + Text *string `json:"text,omitempty" tf:"text,omitempty"` } - type TitleParameters struct { + // Title size. Values: + // Title size + // +kubebuilder:validation:Optional + Size *string `json:"size,omitempty" tf:"size,omitempty"` -// Title size. Values: -// Title size -// +kubebuilder:validation:Optional -Size *string `json:"size,omitempty" tf:"size,omitempty"` - -// Text widget settings. Oneof: text, title or chart. -// Title text -// +kubebuilder:validation:Optional -Text *string `json:"text" tf:"text,omitempty"` + // Text widget settings. Oneof: text, title or chart. + // Title text + // +kubebuilder:validation:Optional + Text *string `json:"text" tf:"text,omitempty"` } - type VisualizationSettingsInitParameters struct { + // Aggregation. Values: + // Aggregation + Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` -// Aggregation. Values: -// Aggregation -Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` - -// Color settings. -// Color scheme settings -ColorSchemeSettings []ColorSchemeSettingsInitParameters `json:"colorSchemeSettings,omitempty" tf:"color_scheme_settings,omitempty"` + // Color settings. + // Color scheme settings + ColorSchemeSettings []ColorSchemeSettingsInitParameters `json:"colorSchemeSettings,omitempty" tf:"color_scheme_settings,omitempty"` -// Heatmap settings. -// Heatmap settings -HeatmapSettings []HeatmapSettingsInitParameters `json:"heatmapSettings,omitempty" tf:"heatmap_settings,omitempty"` + // Heatmap settings. + // Heatmap settings + HeatmapSettings []HeatmapSettingsInitParameters `json:"heatmapSettings,omitempty" tf:"heatmap_settings,omitempty"` -// Interpolate values. Values: -// Interpolate -Interpolate *string `json:"interpolate,omitempty" tf:"interpolate,omitempty"` + // Interpolate values. Values: + // Interpolate + Interpolate *string `json:"interpolate,omitempty" tf:"interpolate,omitempty"` -// Normalize values. -// Normalize -Normalize *bool `json:"normalize,omitempty" tf:"normalize,omitempty"` + // Normalize values. + // Normalize + Normalize *bool `json:"normalize,omitempty" tf:"normalize,omitempty"` -// Show chart labels. -// Show chart labels -ShowLabels *bool `json:"showLabels,omitempty" tf:"show_labels,omitempty"` + // Show chart labels. + // Show chart labels + ShowLabels *bool `json:"showLabels,omitempty" tf:"show_labels,omitempty"` -// Title or empty. -// Inside chart title -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Title or empty. + // Inside chart title + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Type. Values: -// Visualization type -Type *string `json:"type,omitempty" tf:"type,omitempty"` + // Type. Values: + // Visualization type + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Y axis settings. -// Y axis settings -YaxisSettings []YaxisSettingsInitParameters `json:"yaxisSettings,omitempty" tf:"yaxis_settings,omitempty"` + // Y axis settings. + // Y axis settings + YaxisSettings []YaxisSettingsInitParameters `json:"yaxisSettings,omitempty" tf:"yaxis_settings,omitempty"` } - type VisualizationSettingsObservation struct { + // Aggregation. Values: + // Aggregation + Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` -// Aggregation. Values: -// Aggregation -Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` + // Color settings. + // Color scheme settings + ColorSchemeSettings []ColorSchemeSettingsObservation `json:"colorSchemeSettings,omitempty" tf:"color_scheme_settings,omitempty"` -// Color settings. -// Color scheme settings -ColorSchemeSettings []ColorSchemeSettingsObservation `json:"colorSchemeSettings,omitempty" tf:"color_scheme_settings,omitempty"` + // Heatmap settings. + // Heatmap settings + HeatmapSettings []HeatmapSettingsObservation `json:"heatmapSettings,omitempty" tf:"heatmap_settings,omitempty"` -// Heatmap settings. -// Heatmap settings -HeatmapSettings []HeatmapSettingsObservation `json:"heatmapSettings,omitempty" tf:"heatmap_settings,omitempty"` + // Interpolate values. Values: + // Interpolate + Interpolate *string `json:"interpolate,omitempty" tf:"interpolate,omitempty"` -// Interpolate values. Values: -// Interpolate -Interpolate *string `json:"interpolate,omitempty" tf:"interpolate,omitempty"` + // Normalize values. + // Normalize + Normalize *bool `json:"normalize,omitempty" tf:"normalize,omitempty"` -// Normalize values. -// Normalize -Normalize *bool `json:"normalize,omitempty" tf:"normalize,omitempty"` + // Show chart labels. + // Show chart labels + ShowLabels *bool `json:"showLabels,omitempty" tf:"show_labels,omitempty"` -// Show chart labels. -// Show chart labels -ShowLabels *bool `json:"showLabels,omitempty" tf:"show_labels,omitempty"` + // Title or empty. + // Inside chart title + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Title or empty. -// Inside chart title -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Type. Values: + // Visualization type + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Type. Values: -// Visualization type -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// Y axis settings. -// Y axis settings -YaxisSettings []YaxisSettingsObservation `json:"yaxisSettings,omitempty" tf:"yaxis_settings,omitempty"` + // Y axis settings. + // Y axis settings + YaxisSettings []YaxisSettingsObservation `json:"yaxisSettings,omitempty" tf:"yaxis_settings,omitempty"` } - type VisualizationSettingsParameters struct { + // Aggregation. Values: + // Aggregation + // +kubebuilder:validation:Optional + Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` -// Aggregation. Values: -// Aggregation -// +kubebuilder:validation:Optional -Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` + // Color settings. + // Color scheme settings + // +kubebuilder:validation:Optional + ColorSchemeSettings []ColorSchemeSettingsParameters `json:"colorSchemeSettings,omitempty" tf:"color_scheme_settings,omitempty"` -// Color settings. -// Color scheme settings -// +kubebuilder:validation:Optional -ColorSchemeSettings []ColorSchemeSettingsParameters `json:"colorSchemeSettings,omitempty" tf:"color_scheme_settings,omitempty"` + // Heatmap settings. + // Heatmap settings + // +kubebuilder:validation:Optional + HeatmapSettings []HeatmapSettingsParameters `json:"heatmapSettings,omitempty" tf:"heatmap_settings,omitempty"` -// Heatmap settings. -// Heatmap settings -// +kubebuilder:validation:Optional -HeatmapSettings []HeatmapSettingsParameters `json:"heatmapSettings,omitempty" tf:"heatmap_settings,omitempty"` + // Interpolate values. Values: + // Interpolate + // +kubebuilder:validation:Optional + Interpolate *string `json:"interpolate,omitempty" tf:"interpolate,omitempty"` -// Interpolate values. Values: -// Interpolate -// +kubebuilder:validation:Optional -Interpolate *string `json:"interpolate,omitempty" tf:"interpolate,omitempty"` + // Normalize values. + // Normalize + // +kubebuilder:validation:Optional + Normalize *bool `json:"normalize,omitempty" tf:"normalize,omitempty"` -// Normalize values. -// Normalize -// +kubebuilder:validation:Optional -Normalize *bool `json:"normalize,omitempty" tf:"normalize,omitempty"` + // Show chart labels. + // Show chart labels + // +kubebuilder:validation:Optional + ShowLabels *bool `json:"showLabels,omitempty" tf:"show_labels,omitempty"` -// Show chart labels. -// Show chart labels -// +kubebuilder:validation:Optional -ShowLabels *bool `json:"showLabels,omitempty" tf:"show_labels,omitempty"` + // Title or empty. + // Inside chart title + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` -// Title or empty. -// Inside chart title -// +kubebuilder:validation:Optional -Title *string `json:"title,omitempty" tf:"title,omitempty"` + // Type. Values: + // Visualization type + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` -// Type. Values: -// Visualization type -// +kubebuilder:validation:Optional -Type *string `json:"type,omitempty" tf:"type,omitempty"` - -// Y axis settings. -// Y axis settings -// +kubebuilder:validation:Optional -YaxisSettings []YaxisSettingsParameters `json:"yaxisSettings,omitempty" tf:"yaxis_settings,omitempty"` + // Y axis settings. + // Y axis settings + // +kubebuilder:validation:Optional + YaxisSettings []YaxisSettingsParameters `json:"yaxisSettings,omitempty" tf:"yaxis_settings,omitempty"` } - type WidgetsInitParameters struct { + // Chart widget settings. Oneof: text, title or chart. + // Chart widget + Chart []ChartInitParameters `json:"chart,omitempty" tf:"chart,omitempty"` -// Chart widget settings. Oneof: text, title or chart. -// Chart widget -Chart []ChartInitParameters `json:"chart,omitempty" tf:"chart,omitempty"` - -// Widget position. -// Required. Widget layout position -Position []PositionInitParameters `json:"position,omitempty" tf:"position,omitempty"` + // Widget position. + // Required. Widget layout position + Position []PositionInitParameters `json:"position,omitempty" tf:"position,omitempty"` -// Text widget settings. Oneof: text, title or chart. -// Text widget -Text []WidgetsTextInitParameters `json:"text,omitempty" tf:"text,omitempty"` + // Text widget settings. Oneof: text, title or chart. + // Text widget + Text []WidgetsTextInitParameters `json:"text,omitempty" tf:"text,omitempty"` -// Title widget settings. Oneof: text, title or chart. -// Title widget -Title []TitleInitParameters `json:"title,omitempty" tf:"title,omitempty"` + // Title widget settings. Oneof: text, title or chart. + // Title widget + Title []TitleInitParameters `json:"title,omitempty" tf:"title,omitempty"` } - type WidgetsObservation struct { + // Chart widget settings. Oneof: text, title or chart. + // Chart widget + Chart []ChartObservation `json:"chart,omitempty" tf:"chart,omitempty"` -// Chart widget settings. Oneof: text, title or chart. -// Chart widget -Chart []ChartObservation `json:"chart,omitempty" tf:"chart,omitempty"` - -// Widget position. -// Required. Widget layout position -Position []PositionObservation `json:"position,omitempty" tf:"position,omitempty"` + // Widget position. + // Required. Widget layout position + Position []PositionObservation `json:"position,omitempty" tf:"position,omitempty"` -// Text widget settings. Oneof: text, title or chart. -// Text widget -Text []WidgetsTextObservation `json:"text,omitempty" tf:"text,omitempty"` + // Text widget settings. Oneof: text, title or chart. + // Text widget + Text []WidgetsTextObservation `json:"text,omitempty" tf:"text,omitempty"` -// Title widget settings. Oneof: text, title or chart. -// Title widget -Title []TitleObservation `json:"title,omitempty" tf:"title,omitempty"` + // Title widget settings. Oneof: text, title or chart. + // Title widget + Title []TitleObservation `json:"title,omitempty" tf:"title,omitempty"` } - type WidgetsParameters struct { + // Chart widget settings. Oneof: text, title or chart. + // Chart widget + // +kubebuilder:validation:Optional + Chart []ChartParameters `json:"chart,omitempty" tf:"chart,omitempty"` -// Chart widget settings. Oneof: text, title or chart. -// Chart widget -// +kubebuilder:validation:Optional -Chart []ChartParameters `json:"chart,omitempty" tf:"chart,omitempty"` - -// Widget position. -// Required. Widget layout position -// +kubebuilder:validation:Optional -Position []PositionParameters `json:"position,omitempty" tf:"position,omitempty"` + // Widget position. + // Required. Widget layout position + // +kubebuilder:validation:Optional + Position []PositionParameters `json:"position,omitempty" tf:"position,omitempty"` -// Text widget settings. Oneof: text, title or chart. -// Text widget -// +kubebuilder:validation:Optional -Text []WidgetsTextParameters `json:"text,omitempty" tf:"text,omitempty"` + // Text widget settings. Oneof: text, title or chart. + // Text widget + // +kubebuilder:validation:Optional + Text []WidgetsTextParameters `json:"text,omitempty" tf:"text,omitempty"` -// Title widget settings. Oneof: text, title or chart. -// Title widget -// +kubebuilder:validation:Optional -Title []TitleParameters `json:"title,omitempty" tf:"title,omitempty"` + // Title widget settings. Oneof: text, title or chart. + // Title widget + // +kubebuilder:validation:Optional + Title []TitleParameters `json:"title,omitempty" tf:"title,omitempty"` } - type WidgetsTextInitParameters struct { - -// Text widget settings. Oneof: text, title or chart. -// Text -Text *string `json:"text,omitempty" tf:"text,omitempty"` + // Text widget settings. Oneof: text, title or chart. + // Text + Text *string `json:"text,omitempty" tf:"text,omitempty"` } - type WidgetsTextObservation struct { - -// Text widget settings. Oneof: text, title or chart. -// Text -Text *string `json:"text,omitempty" tf:"text,omitempty"` + // Text widget settings. Oneof: text, title or chart. + // Text + Text *string `json:"text,omitempty" tf:"text,omitempty"` } - type WidgetsTextParameters struct { - -// Text widget settings. Oneof: text, title or chart. -// Text -// +kubebuilder:validation:Optional -Text *string `json:"text,omitempty" tf:"text,omitempty"` + // Text widget settings. Oneof: text, title or chart. + // Text + // +kubebuilder:validation:Optional + Text *string `json:"text,omitempty" tf:"text,omitempty"` } - type YaxisSettingsInitParameters struct { + // Left yaxis config. + // Left Y axis settings + Left []LeftInitParameters `json:"left,omitempty" tf:"left,omitempty"` -// Left yaxis config. -// Left Y axis settings -Left []LeftInitParameters `json:"left,omitempty" tf:"left,omitempty"` - -// Right yaxis config. -// Right Y axis settings -Right []RightInitParameters `json:"right,omitempty" tf:"right,omitempty"` + // Right yaxis config. + // Right Y axis settings + Right []RightInitParameters `json:"right,omitempty" tf:"right,omitempty"` } - type YaxisSettingsObservation struct { + // Left yaxis config. + // Left Y axis settings + Left []LeftObservation `json:"left,omitempty" tf:"left,omitempty"` -// Left yaxis config. -// Left Y axis settings -Left []LeftObservation `json:"left,omitempty" tf:"left,omitempty"` - -// Right yaxis config. -// Right Y axis settings -Right []RightObservation `json:"right,omitempty" tf:"right,omitempty"` + // Right yaxis config. + // Right Y axis settings + Right []RightObservation `json:"right,omitempty" tf:"right,omitempty"` } - type YaxisSettingsParameters struct { + // Left yaxis config. + // Left Y axis settings + // +kubebuilder:validation:Optional + Left []LeftParameters `json:"left,omitempty" tf:"left,omitempty"` -// Left yaxis config. -// Left Y axis settings -// +kubebuilder:validation:Optional -Left []LeftParameters `json:"left,omitempty" tf:"left,omitempty"` - -// Right yaxis config. -// Right Y axis settings -// +kubebuilder:validation:Optional -Right []RightParameters `json:"right,omitempty" tf:"right,omitempty"` + // Right yaxis config. + // Right Y axis settings + // +kubebuilder:validation:Optional + Right []RightParameters `json:"right,omitempty" tf:"right,omitempty"` } // DashboardSpec defines the desired state of Dashboard type DashboardSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider DashboardParameters `json:"forProvider"` + ForProvider DashboardParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -1757,20 +1596,19 @@ type DashboardSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider DashboardInitParameters `json:"initProvider,omitempty"` + InitProvider DashboardInitParameters `json:"initProvider,omitempty"` } // DashboardStatus defines the observed state of Dashboard. type DashboardStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider DashboardObservation `json:"atProvider,omitempty"` + AtProvider DashboardObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Dashboard is the Schema for the Dashboards API. Allows management of a Yandex.Cloud Monitoring Dashboard. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -1780,9 +1618,9 @@ type DashboardStatus struct { type Dashboard struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec DashboardSpec `json:"spec"` - Status DashboardStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec DashboardSpec `json:"spec"` + Status DashboardStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/monitoring/v1alpha1/zz_generated.conversion_hubs.go b/apis/monitoring/v1alpha1/zz_generated.conversion_hubs.go index 6bfdb7d..842ae1e 100755 --- a/apis/monitoring/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/monitoring/v1alpha1/zz_generated.conversion_hubs.go @@ -1,10 +1,6 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 - - // Hub marks this type as a conversion hub. - func (tr *Dashboard) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *Dashboard) Hub() {} diff --git a/apis/monitoring/v1alpha1/zz_generated.deepcopy.go b/apis/monitoring/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..38cf1b3 --- /dev/null +++ b/apis/monitoring/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,3067 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticInitParameters) DeepCopyInto(out *AutomaticInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticInitParameters. +func (in *AutomaticInitParameters) DeepCopy() *AutomaticInitParameters { + if in == nil { + return nil + } + out := new(AutomaticInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticObservation) DeepCopyInto(out *AutomaticObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticObservation. +func (in *AutomaticObservation) DeepCopy() *AutomaticObservation { + if in == nil { + return nil + } + out := new(AutomaticObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticParameters) DeepCopyInto(out *AutomaticParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticParameters. +func (in *AutomaticParameters) DeepCopy() *AutomaticParameters { + if in == nil { + return nil + } + out := new(AutomaticParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChartInitParameters) DeepCopyInto(out *ChartInitParameters) { + *out = *in + if in.ChartID != nil { + in, out := &in.ChartID, &out.ChartID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayLegend != nil { + in, out := &in.DisplayLegend, &out.DisplayLegend + *out = new(bool) + **out = **in + } + if in.Freeze != nil { + in, out := &in.Freeze, &out.Freeze + *out = new(string) + **out = **in + } + if in.NameHidingSettings != nil { + in, out := &in.NameHidingSettings, &out.NameHidingSettings + *out = make([]NameHidingSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]QueriesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SeriesOverrides != nil { + in, out := &in.SeriesOverrides, &out.SeriesOverrides + *out = make([]SeriesOverridesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.VisualizationSettings != nil { + in, out := &in.VisualizationSettings, &out.VisualizationSettings + *out = make([]VisualizationSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChartInitParameters. +func (in *ChartInitParameters) DeepCopy() *ChartInitParameters { + if in == nil { + return nil + } + out := new(ChartInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChartObservation) DeepCopyInto(out *ChartObservation) { + *out = *in + if in.ChartID != nil { + in, out := &in.ChartID, &out.ChartID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayLegend != nil { + in, out := &in.DisplayLegend, &out.DisplayLegend + *out = new(bool) + **out = **in + } + if in.Freeze != nil { + in, out := &in.Freeze, &out.Freeze + *out = new(string) + **out = **in + } + if in.NameHidingSettings != nil { + in, out := &in.NameHidingSettings, &out.NameHidingSettings + *out = make([]NameHidingSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]QueriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SeriesOverrides != nil { + in, out := &in.SeriesOverrides, &out.SeriesOverrides + *out = make([]SeriesOverridesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.VisualizationSettings != nil { + in, out := &in.VisualizationSettings, &out.VisualizationSettings + *out = make([]VisualizationSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChartObservation. +func (in *ChartObservation) DeepCopy() *ChartObservation { + if in == nil { + return nil + } + out := new(ChartObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChartParameters) DeepCopyInto(out *ChartParameters) { + *out = *in + if in.ChartID != nil { + in, out := &in.ChartID, &out.ChartID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayLegend != nil { + in, out := &in.DisplayLegend, &out.DisplayLegend + *out = new(bool) + **out = **in + } + if in.Freeze != nil { + in, out := &in.Freeze, &out.Freeze + *out = new(string) + **out = **in + } + if in.NameHidingSettings != nil { + in, out := &in.NameHidingSettings, &out.NameHidingSettings + *out = make([]NameHidingSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]QueriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SeriesOverrides != nil { + in, out := &in.SeriesOverrides, &out.SeriesOverrides + *out = make([]SeriesOverridesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.VisualizationSettings != nil { + in, out := &in.VisualizationSettings, &out.VisualizationSettings + *out = make([]VisualizationSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChartParameters. +func (in *ChartParameters) DeepCopy() *ChartParameters { + if in == nil { + return nil + } + out := new(ChartParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColorSchemeSettingsInitParameters) DeepCopyInto(out *ColorSchemeSettingsInitParameters) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = make([]AutomaticInitParameters, len(*in)) + copy(*out, *in) + } + if in.Gradient != nil { + in, out := &in.Gradient, &out.Gradient + *out = make([]GradientInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Standard != nil { + in, out := &in.Standard, &out.Standard + *out = make([]StandardInitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColorSchemeSettingsInitParameters. +func (in *ColorSchemeSettingsInitParameters) DeepCopy() *ColorSchemeSettingsInitParameters { + if in == nil { + return nil + } + out := new(ColorSchemeSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColorSchemeSettingsObservation) DeepCopyInto(out *ColorSchemeSettingsObservation) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = make([]AutomaticParameters, len(*in)) + copy(*out, *in) + } + if in.Gradient != nil { + in, out := &in.Gradient, &out.Gradient + *out = make([]GradientObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Standard != nil { + in, out := &in.Standard, &out.Standard + *out = make([]StandardParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColorSchemeSettingsObservation. +func (in *ColorSchemeSettingsObservation) DeepCopy() *ColorSchemeSettingsObservation { + if in == nil { + return nil + } + out := new(ColorSchemeSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColorSchemeSettingsParameters) DeepCopyInto(out *ColorSchemeSettingsParameters) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = make([]AutomaticParameters, len(*in)) + copy(*out, *in) + } + if in.Gradient != nil { + in, out := &in.Gradient, &out.Gradient + *out = make([]GradientParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Standard != nil { + in, out := &in.Standard, &out.Standard + *out = make([]StandardParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColorSchemeSettingsParameters. +func (in *ColorSchemeSettingsParameters) DeepCopy() *ColorSchemeSettingsParameters { + if in == nil { + return nil + } + out := new(ColorSchemeSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomInitParameters) DeepCopyInto(out *CustomInitParameters) { + *out = *in + if in.DefaultValues != nil { + in, out := &in.DefaultValues, &out.DefaultValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Multiselectable != nil { + in, out := &in.Multiselectable, &out.Multiselectable + *out = new(bool) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomInitParameters. +func (in *CustomInitParameters) DeepCopy() *CustomInitParameters { + if in == nil { + return nil + } + out := new(CustomInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomObservation) DeepCopyInto(out *CustomObservation) { + *out = *in + if in.DefaultValues != nil { + in, out := &in.DefaultValues, &out.DefaultValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Multiselectable != nil { + in, out := &in.Multiselectable, &out.Multiselectable + *out = new(bool) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomObservation. +func (in *CustomObservation) DeepCopy() *CustomObservation { + if in == nil { + return nil + } + out := new(CustomObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomParameters) DeepCopyInto(out *CustomParameters) { + *out = *in + if in.DefaultValues != nil { + in, out := &in.DefaultValues, &out.DefaultValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Multiselectable != nil { + in, out := &in.Multiselectable, &out.Multiselectable + *out = new(bool) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomParameters. +func (in *CustomParameters) DeepCopy() *CustomParameters { + if in == nil { + return nil + } + out := new(CustomParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dashboard) DeepCopyInto(out *Dashboard) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dashboard. +func (in *Dashboard) DeepCopy() *Dashboard { + if in == nil { + return nil + } + out := new(Dashboard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Dashboard) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardInitParameters) DeepCopyInto(out *DashboardInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parametrization != nil { + in, out := &in.Parametrization, &out.Parametrization + *out = make([]ParametrizationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Widgets != nil { + in, out := &in.Widgets, &out.Widgets + *out = make([]WidgetsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardInitParameters. +func (in *DashboardInitParameters) DeepCopy() *DashboardInitParameters { + if in == nil { + return nil + } + out := new(DashboardInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardList) DeepCopyInto(out *DashboardList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Dashboard, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardList. +func (in *DashboardList) DeepCopy() *DashboardList { + if in == nil { + return nil + } + out := new(DashboardList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DashboardList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardObservation) DeepCopyInto(out *DashboardObservation) { + *out = *in + if in.DashboardID != nil { + in, out := &in.DashboardID, &out.DashboardID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parametrization != nil { + in, out := &in.Parametrization, &out.Parametrization + *out = make([]ParametrizationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Widgets != nil { + in, out := &in.Widgets, &out.Widgets + *out = make([]WidgetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardObservation. +func (in *DashboardObservation) DeepCopy() *DashboardObservation { + if in == nil { + return nil + } + out := new(DashboardObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardParameters) DeepCopyInto(out *DashboardParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parametrization != nil { + in, out := &in.Parametrization, &out.Parametrization + *out = make([]ParametrizationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Widgets != nil { + in, out := &in.Widgets, &out.Widgets + *out = make([]WidgetsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardParameters. +func (in *DashboardParameters) DeepCopy() *DashboardParameters { + if in == nil { + return nil + } + out := new(DashboardParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardSpec) DeepCopyInto(out *DashboardSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardSpec. +func (in *DashboardSpec) DeepCopy() *DashboardSpec { + if in == nil { + return nil + } + out := new(DashboardSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardStatus) DeepCopyInto(out *DashboardStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardStatus. +func (in *DashboardStatus) DeepCopy() *DashboardStatus { + if in == nil { + return nil + } + out := new(DashboardStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownsamplingInitParameters) DeepCopyInto(out *DownsamplingInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.GapFilling != nil { + in, out := &in.GapFilling, &out.GapFilling + *out = new(string) + **out = **in + } + if in.GridAggregation != nil { + in, out := &in.GridAggregation, &out.GridAggregation + *out = new(string) + **out = **in + } + if in.GridInterval != nil { + in, out := &in.GridInterval, &out.GridInterval + *out = new(float64) + **out = **in + } + if in.MaxPoints != nil { + in, out := &in.MaxPoints, &out.MaxPoints + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownsamplingInitParameters. +func (in *DownsamplingInitParameters) DeepCopy() *DownsamplingInitParameters { + if in == nil { + return nil + } + out := new(DownsamplingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownsamplingObservation) DeepCopyInto(out *DownsamplingObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.GapFilling != nil { + in, out := &in.GapFilling, &out.GapFilling + *out = new(string) + **out = **in + } + if in.GridAggregation != nil { + in, out := &in.GridAggregation, &out.GridAggregation + *out = new(string) + **out = **in + } + if in.GridInterval != nil { + in, out := &in.GridInterval, &out.GridInterval + *out = new(float64) + **out = **in + } + if in.MaxPoints != nil { + in, out := &in.MaxPoints, &out.MaxPoints + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownsamplingObservation. +func (in *DownsamplingObservation) DeepCopy() *DownsamplingObservation { + if in == nil { + return nil + } + out := new(DownsamplingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownsamplingParameters) DeepCopyInto(out *DownsamplingParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.GapFilling != nil { + in, out := &in.GapFilling, &out.GapFilling + *out = new(string) + **out = **in + } + if in.GridAggregation != nil { + in, out := &in.GridAggregation, &out.GridAggregation + *out = new(string) + **out = **in + } + if in.GridInterval != nil { + in, out := &in.GridInterval, &out.GridInterval + *out = new(float64) + **out = **in + } + if in.MaxPoints != nil { + in, out := &in.MaxPoints, &out.MaxPoints + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownsamplingParameters. +func (in *DownsamplingParameters) DeepCopy() *DownsamplingParameters { + if in == nil { + return nil + } + out := new(DownsamplingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GradientInitParameters) DeepCopyInto(out *GradientInitParameters) { + *out = *in + if in.GreenValue != nil { + in, out := &in.GreenValue, &out.GreenValue + *out = new(string) + **out = **in + } + if in.RedValue != nil { + in, out := &in.RedValue, &out.RedValue + *out = new(string) + **out = **in + } + if in.VioletValue != nil { + in, out := &in.VioletValue, &out.VioletValue + *out = new(string) + **out = **in + } + if in.YellowValue != nil { + in, out := &in.YellowValue, &out.YellowValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GradientInitParameters. +func (in *GradientInitParameters) DeepCopy() *GradientInitParameters { + if in == nil { + return nil + } + out := new(GradientInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GradientObservation) DeepCopyInto(out *GradientObservation) { + *out = *in + if in.GreenValue != nil { + in, out := &in.GreenValue, &out.GreenValue + *out = new(string) + **out = **in + } + if in.RedValue != nil { + in, out := &in.RedValue, &out.RedValue + *out = new(string) + **out = **in + } + if in.VioletValue != nil { + in, out := &in.VioletValue, &out.VioletValue + *out = new(string) + **out = **in + } + if in.YellowValue != nil { + in, out := &in.YellowValue, &out.YellowValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GradientObservation. +func (in *GradientObservation) DeepCopy() *GradientObservation { + if in == nil { + return nil + } + out := new(GradientObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GradientParameters) DeepCopyInto(out *GradientParameters) { + *out = *in + if in.GreenValue != nil { + in, out := &in.GreenValue, &out.GreenValue + *out = new(string) + **out = **in + } + if in.RedValue != nil { + in, out := &in.RedValue, &out.RedValue + *out = new(string) + **out = **in + } + if in.VioletValue != nil { + in, out := &in.VioletValue, &out.VioletValue + *out = new(string) + **out = **in + } + if in.YellowValue != nil { + in, out := &in.YellowValue, &out.YellowValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GradientParameters. +func (in *GradientParameters) DeepCopy() *GradientParameters { + if in == nil { + return nil + } + out := new(GradientParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeatmapSettingsInitParameters) DeepCopyInto(out *HeatmapSettingsInitParameters) { + *out = *in + if in.GreenValue != nil { + in, out := &in.GreenValue, &out.GreenValue + *out = new(string) + **out = **in + } + if in.RedValue != nil { + in, out := &in.RedValue, &out.RedValue + *out = new(string) + **out = **in + } + if in.VioletValue != nil { + in, out := &in.VioletValue, &out.VioletValue + *out = new(string) + **out = **in + } + if in.YellowValue != nil { + in, out := &in.YellowValue, &out.YellowValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeatmapSettingsInitParameters. +func (in *HeatmapSettingsInitParameters) DeepCopy() *HeatmapSettingsInitParameters { + if in == nil { + return nil + } + out := new(HeatmapSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeatmapSettingsObservation) DeepCopyInto(out *HeatmapSettingsObservation) { + *out = *in + if in.GreenValue != nil { + in, out := &in.GreenValue, &out.GreenValue + *out = new(string) + **out = **in + } + if in.RedValue != nil { + in, out := &in.RedValue, &out.RedValue + *out = new(string) + **out = **in + } + if in.VioletValue != nil { + in, out := &in.VioletValue, &out.VioletValue + *out = new(string) + **out = **in + } + if in.YellowValue != nil { + in, out := &in.YellowValue, &out.YellowValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeatmapSettingsObservation. +func (in *HeatmapSettingsObservation) DeepCopy() *HeatmapSettingsObservation { + if in == nil { + return nil + } + out := new(HeatmapSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeatmapSettingsParameters) DeepCopyInto(out *HeatmapSettingsParameters) { + *out = *in + if in.GreenValue != nil { + in, out := &in.GreenValue, &out.GreenValue + *out = new(string) + **out = **in + } + if in.RedValue != nil { + in, out := &in.RedValue, &out.RedValue + *out = new(string) + **out = **in + } + if in.VioletValue != nil { + in, out := &in.VioletValue, &out.VioletValue + *out = new(string) + **out = **in + } + if in.YellowValue != nil { + in, out := &in.YellowValue, &out.YellowValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeatmapSettingsParameters. +func (in *HeatmapSettingsParameters) DeepCopy() *HeatmapSettingsParameters { + if in == nil { + return nil + } + out := new(HeatmapSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelValuesInitParameters) DeepCopyInto(out *LabelValuesInitParameters) { + *out = *in + if in.DefaultValues != nil { + in, out := &in.DefaultValues, &out.DefaultValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabelKey != nil { + in, out := &in.LabelKey, &out.LabelKey + *out = new(string) + **out = **in + } + if in.Multiselectable != nil { + in, out := &in.Multiselectable, &out.Multiselectable + *out = new(bool) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelValuesInitParameters. +func (in *LabelValuesInitParameters) DeepCopy() *LabelValuesInitParameters { + if in == nil { + return nil + } + out := new(LabelValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelValuesObservation) DeepCopyInto(out *LabelValuesObservation) { + *out = *in + if in.DefaultValues != nil { + in, out := &in.DefaultValues, &out.DefaultValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LabelKey != nil { + in, out := &in.LabelKey, &out.LabelKey + *out = new(string) + **out = **in + } + if in.Multiselectable != nil { + in, out := &in.Multiselectable, &out.Multiselectable + *out = new(bool) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelValuesObservation. +func (in *LabelValuesObservation) DeepCopy() *LabelValuesObservation { + if in == nil { + return nil + } + out := new(LabelValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelValuesParameters) DeepCopyInto(out *LabelValuesParameters) { + *out = *in + if in.DefaultValues != nil { + in, out := &in.DefaultValues, &out.DefaultValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabelKey != nil { + in, out := &in.LabelKey, &out.LabelKey + *out = new(string) + **out = **in + } + if in.Multiselectable != nil { + in, out := &in.Multiselectable, &out.Multiselectable + *out = new(bool) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelValuesParameters. +func (in *LabelValuesParameters) DeepCopy() *LabelValuesParameters { + if in == nil { + return nil + } + out := new(LabelValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeftInitParameters) DeepCopyInto(out *LeftInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(string) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(string) + **out = **in + } + if in.Precision != nil { + in, out := &in.Precision, &out.Precision + *out = new(float64) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UnitFormat != nil { + in, out := &in.UnitFormat, &out.UnitFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeftInitParameters. +func (in *LeftInitParameters) DeepCopy() *LeftInitParameters { + if in == nil { + return nil + } + out := new(LeftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeftObservation) DeepCopyInto(out *LeftObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(string) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(string) + **out = **in + } + if in.Precision != nil { + in, out := &in.Precision, &out.Precision + *out = new(float64) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UnitFormat != nil { + in, out := &in.UnitFormat, &out.UnitFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeftObservation. +func (in *LeftObservation) DeepCopy() *LeftObservation { + if in == nil { + return nil + } + out := new(LeftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeftParameters) DeepCopyInto(out *LeftParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(string) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(string) + **out = **in + } + if in.Precision != nil { + in, out := &in.Precision, &out.Precision + *out = new(float64) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UnitFormat != nil { + in, out := &in.UnitFormat, &out.UnitFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeftParameters. +func (in *LeftParameters) DeepCopy() *LeftParameters { + if in == nil { + return nil + } + out := new(LeftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameHidingSettingsInitParameters) DeepCopyInto(out *NameHidingSettingsInitParameters) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Positive != nil { + in, out := &in.Positive, &out.Positive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameHidingSettingsInitParameters. +func (in *NameHidingSettingsInitParameters) DeepCopy() *NameHidingSettingsInitParameters { + if in == nil { + return nil + } + out := new(NameHidingSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameHidingSettingsObservation) DeepCopyInto(out *NameHidingSettingsObservation) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Positive != nil { + in, out := &in.Positive, &out.Positive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameHidingSettingsObservation. +func (in *NameHidingSettingsObservation) DeepCopy() *NameHidingSettingsObservation { + if in == nil { + return nil + } + out := new(NameHidingSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameHidingSettingsParameters) DeepCopyInto(out *NameHidingSettingsParameters) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Positive != nil { + in, out := &in.Positive, &out.Positive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameHidingSettingsParameters. +func (in *NameHidingSettingsParameters) DeepCopy() *NameHidingSettingsParameters { + if in == nil { + return nil + } + out := new(NameHidingSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersInitParameters) DeepCopyInto(out *ParametersInitParameters) { + *out = *in + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = make([]CustomInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Hidden != nil { + in, out := &in.Hidden, &out.Hidden + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LabelValues != nil { + in, out := &in.LabelValues, &out.LabelValues + *out = make([]LabelValuesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = make([]TextInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersInitParameters. +func (in *ParametersInitParameters) DeepCopy() *ParametersInitParameters { + if in == nil { + return nil + } + out := new(ParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersObservation) DeepCopyInto(out *ParametersObservation) { + *out = *in + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = make([]CustomObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Hidden != nil { + in, out := &in.Hidden, &out.Hidden + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LabelValues != nil { + in, out := &in.LabelValues, &out.LabelValues + *out = make([]LabelValuesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = make([]TextObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersObservation. +func (in *ParametersObservation) DeepCopy() *ParametersObservation { + if in == nil { + return nil + } + out := new(ParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersParameters) DeepCopyInto(out *ParametersParameters) { + *out = *in + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = make([]CustomParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Hidden != nil { + in, out := &in.Hidden, &out.Hidden + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LabelValues != nil { + in, out := &in.LabelValues, &out.LabelValues + *out = make([]LabelValuesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = make([]TextParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersParameters. +func (in *ParametersParameters) DeepCopy() *ParametersParameters { + if in == nil { + return nil + } + out := new(ParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametrizationInitParameters) DeepCopyInto(out *ParametrizationInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametrizationInitParameters. +func (in *ParametrizationInitParameters) DeepCopy() *ParametrizationInitParameters { + if in == nil { + return nil + } + out := new(ParametrizationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametrizationObservation) DeepCopyInto(out *ParametrizationObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametrizationObservation. +func (in *ParametrizationObservation) DeepCopy() *ParametrizationObservation { + if in == nil { + return nil + } + out := new(ParametrizationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametrizationParameters) DeepCopyInto(out *ParametrizationParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametrizationParameters. +func (in *ParametrizationParameters) DeepCopy() *ParametrizationParameters { + if in == nil { + return nil + } + out := new(ParametrizationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PositionInitParameters) DeepCopyInto(out *PositionInitParameters) { + *out = *in + if in.H != nil { + in, out := &in.H, &out.H + *out = new(float64) + **out = **in + } + if in.W != nil { + in, out := &in.W, &out.W + *out = new(float64) + **out = **in + } + if in.X != nil { + in, out := &in.X, &out.X + *out = new(float64) + **out = **in + } + if in.Y != nil { + in, out := &in.Y, &out.Y + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PositionInitParameters. +func (in *PositionInitParameters) DeepCopy() *PositionInitParameters { + if in == nil { + return nil + } + out := new(PositionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PositionObservation) DeepCopyInto(out *PositionObservation) { + *out = *in + if in.H != nil { + in, out := &in.H, &out.H + *out = new(float64) + **out = **in + } + if in.W != nil { + in, out := &in.W, &out.W + *out = new(float64) + **out = **in + } + if in.X != nil { + in, out := &in.X, &out.X + *out = new(float64) + **out = **in + } + if in.Y != nil { + in, out := &in.Y, &out.Y + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PositionObservation. +func (in *PositionObservation) DeepCopy() *PositionObservation { + if in == nil { + return nil + } + out := new(PositionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PositionParameters) DeepCopyInto(out *PositionParameters) { + *out = *in + if in.H != nil { + in, out := &in.H, &out.H + *out = new(float64) + **out = **in + } + if in.W != nil { + in, out := &in.W, &out.W + *out = new(float64) + **out = **in + } + if in.X != nil { + in, out := &in.X, &out.X + *out = new(float64) + **out = **in + } + if in.Y != nil { + in, out := &in.Y, &out.Y + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PositionParameters. +func (in *PositionParameters) DeepCopy() *PositionParameters { + if in == nil { + return nil + } + out := new(PositionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesInitParameters) DeepCopyInto(out *QueriesInitParameters) { + *out = *in + if in.Downsampling != nil { + in, out := &in.Downsampling, &out.Downsampling + *out = make([]DownsamplingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]TargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesInitParameters. +func (in *QueriesInitParameters) DeepCopy() *QueriesInitParameters { + if in == nil { + return nil + } + out := new(QueriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesObservation) DeepCopyInto(out *QueriesObservation) { + *out = *in + if in.Downsampling != nil { + in, out := &in.Downsampling, &out.Downsampling + *out = make([]DownsamplingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]TargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesObservation. +func (in *QueriesObservation) DeepCopy() *QueriesObservation { + if in == nil { + return nil + } + out := new(QueriesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesParameters) DeepCopyInto(out *QueriesParameters) { + *out = *in + if in.Downsampling != nil { + in, out := &in.Downsampling, &out.Downsampling + *out = make([]DownsamplingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]TargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesParameters. +func (in *QueriesParameters) DeepCopy() *QueriesParameters { + if in == nil { + return nil + } + out := new(QueriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RightInitParameters) DeepCopyInto(out *RightInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(string) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(string) + **out = **in + } + if in.Precision != nil { + in, out := &in.Precision, &out.Precision + *out = new(float64) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UnitFormat != nil { + in, out := &in.UnitFormat, &out.UnitFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RightInitParameters. +func (in *RightInitParameters) DeepCopy() *RightInitParameters { + if in == nil { + return nil + } + out := new(RightInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RightObservation) DeepCopyInto(out *RightObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(string) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(string) + **out = **in + } + if in.Precision != nil { + in, out := &in.Precision, &out.Precision + *out = new(float64) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UnitFormat != nil { + in, out := &in.UnitFormat, &out.UnitFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RightObservation. +func (in *RightObservation) DeepCopy() *RightObservation { + if in == nil { + return nil + } + out := new(RightObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RightParameters) DeepCopyInto(out *RightParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(string) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(string) + **out = **in + } + if in.Precision != nil { + in, out := &in.Precision, &out.Precision + *out = new(float64) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UnitFormat != nil { + in, out := &in.UnitFormat, &out.UnitFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RightParameters. +func (in *RightParameters) DeepCopy() *RightParameters { + if in == nil { + return nil + } + out := new(RightParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeriesOverridesInitParameters) DeepCopyInto(out *SeriesOverridesInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetIndex != nil { + in, out := &in.TargetIndex, &out.TargetIndex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeriesOverridesInitParameters. +func (in *SeriesOverridesInitParameters) DeepCopy() *SeriesOverridesInitParameters { + if in == nil { + return nil + } + out := new(SeriesOverridesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeriesOverridesObservation) DeepCopyInto(out *SeriesOverridesObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetIndex != nil { + in, out := &in.TargetIndex, &out.TargetIndex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeriesOverridesObservation. +func (in *SeriesOverridesObservation) DeepCopy() *SeriesOverridesObservation { + if in == nil { + return nil + } + out := new(SeriesOverridesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeriesOverridesParameters) DeepCopyInto(out *SeriesOverridesParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetIndex != nil { + in, out := &in.TargetIndex, &out.TargetIndex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeriesOverridesParameters. +func (in *SeriesOverridesParameters) DeepCopy() *SeriesOverridesParameters { + if in == nil { + return nil + } + out := new(SeriesOverridesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsInitParameters) DeepCopyInto(out *SettingsInitParameters) { + *out = *in + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.GrowDown != nil { + in, out := &in.GrowDown, &out.GrowDown + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackName != nil { + in, out := &in.StackName, &out.StackName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.YaxisPosition != nil { + in, out := &in.YaxisPosition, &out.YaxisPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsInitParameters. +func (in *SettingsInitParameters) DeepCopy() *SettingsInitParameters { + if in == nil { + return nil + } + out := new(SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsObservation) DeepCopyInto(out *SettingsObservation) { + *out = *in + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.GrowDown != nil { + in, out := &in.GrowDown, &out.GrowDown + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackName != nil { + in, out := &in.StackName, &out.StackName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.YaxisPosition != nil { + in, out := &in.YaxisPosition, &out.YaxisPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsObservation. +func (in *SettingsObservation) DeepCopy() *SettingsObservation { + if in == nil { + return nil + } + out := new(SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsParameters) DeepCopyInto(out *SettingsParameters) { + *out = *in + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.GrowDown != nil { + in, out := &in.GrowDown, &out.GrowDown + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackName != nil { + in, out := &in.StackName, &out.StackName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.YaxisPosition != nil { + in, out := &in.YaxisPosition, &out.YaxisPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsParameters. +func (in *SettingsParameters) DeepCopy() *SettingsParameters { + if in == nil { + return nil + } + out := new(SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandardInitParameters) DeepCopyInto(out *StandardInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandardInitParameters. +func (in *StandardInitParameters) DeepCopy() *StandardInitParameters { + if in == nil { + return nil + } + out := new(StandardInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandardObservation) DeepCopyInto(out *StandardObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandardObservation. +func (in *StandardObservation) DeepCopy() *StandardObservation { + if in == nil { + return nil + } + out := new(StandardObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandardParameters) DeepCopyInto(out *StandardParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandardParameters. +func (in *StandardParameters) DeepCopy() *StandardParameters { + if in == nil { + return nil + } + out := new(StandardParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetInitParameters) DeepCopyInto(out *TargetInitParameters) { + *out = *in + if in.Hidden != nil { + in, out := &in.Hidden, &out.Hidden + *out = new(bool) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.TextMode != nil { + in, out := &in.TextMode, &out.TextMode + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetInitParameters. +func (in *TargetInitParameters) DeepCopy() *TargetInitParameters { + if in == nil { + return nil + } + out := new(TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObservation) DeepCopyInto(out *TargetObservation) { + *out = *in + if in.Hidden != nil { + in, out := &in.Hidden, &out.Hidden + *out = new(bool) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.TextMode != nil { + in, out := &in.TextMode, &out.TextMode + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObservation. +func (in *TargetObservation) DeepCopy() *TargetObservation { + if in == nil { + return nil + } + out := new(TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetParameters) DeepCopyInto(out *TargetParameters) { + *out = *in + if in.Hidden != nil { + in, out := &in.Hidden, &out.Hidden + *out = new(bool) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.TextMode != nil { + in, out := &in.TextMode, &out.TextMode + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetParameters. +func (in *TargetParameters) DeepCopy() *TargetParameters { + if in == nil { + return nil + } + out := new(TargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextInitParameters) DeepCopyInto(out *TextInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextInitParameters. +func (in *TextInitParameters) DeepCopy() *TextInitParameters { + if in == nil { + return nil + } + out := new(TextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextObservation) DeepCopyInto(out *TextObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextObservation. +func (in *TextObservation) DeepCopy() *TextObservation { + if in == nil { + return nil + } + out := new(TextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextParameters) DeepCopyInto(out *TextParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextParameters. +func (in *TextParameters) DeepCopy() *TextParameters { + if in == nil { + return nil + } + out := new(TextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TitleInitParameters) DeepCopyInto(out *TitleInitParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TitleInitParameters. +func (in *TitleInitParameters) DeepCopy() *TitleInitParameters { + if in == nil { + return nil + } + out := new(TitleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TitleObservation) DeepCopyInto(out *TitleObservation) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TitleObservation. +func (in *TitleObservation) DeepCopy() *TitleObservation { + if in == nil { + return nil + } + out := new(TitleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TitleParameters) DeepCopyInto(out *TitleParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TitleParameters. +func (in *TitleParameters) DeepCopy() *TitleParameters { + if in == nil { + return nil + } + out := new(TitleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VisualizationSettingsInitParameters) DeepCopyInto(out *VisualizationSettingsInitParameters) { + *out = *in + if in.Aggregation != nil { + in, out := &in.Aggregation, &out.Aggregation + *out = new(string) + **out = **in + } + if in.ColorSchemeSettings != nil { + in, out := &in.ColorSchemeSettings, &out.ColorSchemeSettings + *out = make([]ColorSchemeSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HeatmapSettings != nil { + in, out := &in.HeatmapSettings, &out.HeatmapSettings + *out = make([]HeatmapSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Interpolate != nil { + in, out := &in.Interpolate, &out.Interpolate + *out = new(string) + **out = **in + } + if in.Normalize != nil { + in, out := &in.Normalize, &out.Normalize + *out = new(bool) + **out = **in + } + if in.ShowLabels != nil { + in, out := &in.ShowLabels, &out.ShowLabels + *out = new(bool) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.YaxisSettings != nil { + in, out := &in.YaxisSettings, &out.YaxisSettings + *out = make([]YaxisSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationSettingsInitParameters. +func (in *VisualizationSettingsInitParameters) DeepCopy() *VisualizationSettingsInitParameters { + if in == nil { + return nil + } + out := new(VisualizationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VisualizationSettingsObservation) DeepCopyInto(out *VisualizationSettingsObservation) { + *out = *in + if in.Aggregation != nil { + in, out := &in.Aggregation, &out.Aggregation + *out = new(string) + **out = **in + } + if in.ColorSchemeSettings != nil { + in, out := &in.ColorSchemeSettings, &out.ColorSchemeSettings + *out = make([]ColorSchemeSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HeatmapSettings != nil { + in, out := &in.HeatmapSettings, &out.HeatmapSettings + *out = make([]HeatmapSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Interpolate != nil { + in, out := &in.Interpolate, &out.Interpolate + *out = new(string) + **out = **in + } + if in.Normalize != nil { + in, out := &in.Normalize, &out.Normalize + *out = new(bool) + **out = **in + } + if in.ShowLabels != nil { + in, out := &in.ShowLabels, &out.ShowLabels + *out = new(bool) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.YaxisSettings != nil { + in, out := &in.YaxisSettings, &out.YaxisSettings + *out = make([]YaxisSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationSettingsObservation. +func (in *VisualizationSettingsObservation) DeepCopy() *VisualizationSettingsObservation { + if in == nil { + return nil + } + out := new(VisualizationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VisualizationSettingsParameters) DeepCopyInto(out *VisualizationSettingsParameters) { + *out = *in + if in.Aggregation != nil { + in, out := &in.Aggregation, &out.Aggregation + *out = new(string) + **out = **in + } + if in.ColorSchemeSettings != nil { + in, out := &in.ColorSchemeSettings, &out.ColorSchemeSettings + *out = make([]ColorSchemeSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HeatmapSettings != nil { + in, out := &in.HeatmapSettings, &out.HeatmapSettings + *out = make([]HeatmapSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Interpolate != nil { + in, out := &in.Interpolate, &out.Interpolate + *out = new(string) + **out = **in + } + if in.Normalize != nil { + in, out := &in.Normalize, &out.Normalize + *out = new(bool) + **out = **in + } + if in.ShowLabels != nil { + in, out := &in.ShowLabels, &out.ShowLabels + *out = new(bool) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.YaxisSettings != nil { + in, out := &in.YaxisSettings, &out.YaxisSettings + *out = make([]YaxisSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationSettingsParameters. +func (in *VisualizationSettingsParameters) DeepCopy() *VisualizationSettingsParameters { + if in == nil { + return nil + } + out := new(VisualizationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WidgetsInitParameters) DeepCopyInto(out *WidgetsInitParameters) { + *out = *in + if in.Chart != nil { + in, out := &in.Chart, &out.Chart + *out = make([]ChartInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = make([]PositionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = make([]WidgetsTextInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = make([]TitleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WidgetsInitParameters. +func (in *WidgetsInitParameters) DeepCopy() *WidgetsInitParameters { + if in == nil { + return nil + } + out := new(WidgetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WidgetsObservation) DeepCopyInto(out *WidgetsObservation) { + *out = *in + if in.Chart != nil { + in, out := &in.Chart, &out.Chart + *out = make([]ChartObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = make([]PositionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = make([]WidgetsTextObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = make([]TitleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WidgetsObservation. +func (in *WidgetsObservation) DeepCopy() *WidgetsObservation { + if in == nil { + return nil + } + out := new(WidgetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WidgetsParameters) DeepCopyInto(out *WidgetsParameters) { + *out = *in + if in.Chart != nil { + in, out := &in.Chart, &out.Chart + *out = make([]ChartParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = make([]PositionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = make([]WidgetsTextParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = make([]TitleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WidgetsParameters. +func (in *WidgetsParameters) DeepCopy() *WidgetsParameters { + if in == nil { + return nil + } + out := new(WidgetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WidgetsTextInitParameters) DeepCopyInto(out *WidgetsTextInitParameters) { + *out = *in + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WidgetsTextInitParameters. +func (in *WidgetsTextInitParameters) DeepCopy() *WidgetsTextInitParameters { + if in == nil { + return nil + } + out := new(WidgetsTextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WidgetsTextObservation) DeepCopyInto(out *WidgetsTextObservation) { + *out = *in + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WidgetsTextObservation. +func (in *WidgetsTextObservation) DeepCopy() *WidgetsTextObservation { + if in == nil { + return nil + } + out := new(WidgetsTextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WidgetsTextParameters) DeepCopyInto(out *WidgetsTextParameters) { + *out = *in + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WidgetsTextParameters. +func (in *WidgetsTextParameters) DeepCopy() *WidgetsTextParameters { + if in == nil { + return nil + } + out := new(WidgetsTextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YaxisSettingsInitParameters) DeepCopyInto(out *YaxisSettingsInitParameters) { + *out = *in + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = make([]LeftInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Right != nil { + in, out := &in.Right, &out.Right + *out = make([]RightInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YaxisSettingsInitParameters. +func (in *YaxisSettingsInitParameters) DeepCopy() *YaxisSettingsInitParameters { + if in == nil { + return nil + } + out := new(YaxisSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YaxisSettingsObservation) DeepCopyInto(out *YaxisSettingsObservation) { + *out = *in + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = make([]LeftObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Right != nil { + in, out := &in.Right, &out.Right + *out = make([]RightObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YaxisSettingsObservation. +func (in *YaxisSettingsObservation) DeepCopy() *YaxisSettingsObservation { + if in == nil { + return nil + } + out := new(YaxisSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YaxisSettingsParameters) DeepCopyInto(out *YaxisSettingsParameters) { + *out = *in + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = make([]LeftParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Right != nil { + in, out := &in.Right, &out.Right + *out = make([]RightParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YaxisSettingsParameters. +func (in *YaxisSettingsParameters) DeepCopy() *YaxisSettingsParameters { + if in == nil { + return nil + } + out := new(YaxisSettingsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/monitoring/v1alpha1/zz_generated.resolvers.go b/apis/monitoring/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..92cea05 --- /dev/null +++ b/apis/monitoring/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,98 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Dashboard. +func (mg *Dashboard) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Parametrization); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Parametrization[i3].Parameters); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Parametrization[i3].Parameters[i4].LabelValues); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderIDRef, + Selector: mg.Spec.ForProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderID") + } + mg.Spec.ForProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderIDRef = rsp.ResolvedReference + + } + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Parametrization); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Parametrization[i3].Parameters); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Parametrization[i3].Parameters[i4].LabelValues); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderIDRef, + Selector: mg.Spec.InitProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderID") + } + mg.Spec.InitProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Parametrization[i3].Parameters[i4].LabelValues[i5].FolderIDRef = rsp.ResolvedReference + + } + } + } + + return nil +} diff --git a/apis/monitoring/v1alpha1/zz_groupversion_info.go b/apis/monitoring/v1alpha1/zz_groupversion_info.go index 4c900c9..517997f 100755 --- a/apis/monitoring/v1alpha1/zz_groupversion_info.go +++ b/apis/monitoring/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/organizationmanager/v1alpha1/zz_generated.conversion_hubs.go b/apis/organizationmanager/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..843aca5 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,30 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Group) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GroupIAMMember) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GroupMembership) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OrganizationIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OrganizationIAMMember) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OsLoginSettings) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SAMLFederation) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SAMLFederationUserAccount) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *UserSSHKey) Hub() {} diff --git a/apis/organizationmanager/v1alpha1/zz_generated.deepcopy.go b/apis/organizationmanager/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..0d4ea33 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,2218 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Group) DeepCopyInto(out *Group) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group. +func (in *Group) DeepCopy() *Group { + if in == nil { + return nil + } + out := new(Group) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Group) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupIAMMember) DeepCopyInto(out *GroupIAMMember) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupIAMMember. +func (in *GroupIAMMember) DeepCopy() *GroupIAMMember { + if in == nil { + return nil + } + out := new(GroupIAMMember) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupIAMMember) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupIAMMemberInitParameters) DeepCopyInto(out *GroupIAMMemberInitParameters) { + *out = *in + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.GroupIDRef != nil { + in, out := &in.GroupIDRef, &out.GroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GroupIDSelector != nil { + in, out := &in.GroupIDSelector, &out.GroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupIAMMemberInitParameters. +func (in *GroupIAMMemberInitParameters) DeepCopy() *GroupIAMMemberInitParameters { + if in == nil { + return nil + } + out := new(GroupIAMMemberInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupIAMMemberList) DeepCopyInto(out *GroupIAMMemberList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GroupIAMMember, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupIAMMemberList. +func (in *GroupIAMMemberList) DeepCopy() *GroupIAMMemberList { + if in == nil { + return nil + } + out := new(GroupIAMMemberList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupIAMMemberList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupIAMMemberObservation) DeepCopyInto(out *GroupIAMMemberObservation) { + *out = *in + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupIAMMemberObservation. +func (in *GroupIAMMemberObservation) DeepCopy() *GroupIAMMemberObservation { + if in == nil { + return nil + } + out := new(GroupIAMMemberObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupIAMMemberParameters) DeepCopyInto(out *GroupIAMMemberParameters) { + *out = *in + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.GroupIDRef != nil { + in, out := &in.GroupIDRef, &out.GroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GroupIDSelector != nil { + in, out := &in.GroupIDSelector, &out.GroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupIAMMemberParameters. +func (in *GroupIAMMemberParameters) DeepCopy() *GroupIAMMemberParameters { + if in == nil { + return nil + } + out := new(GroupIAMMemberParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupIAMMemberSpec) DeepCopyInto(out *GroupIAMMemberSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupIAMMemberSpec. +func (in *GroupIAMMemberSpec) DeepCopy() *GroupIAMMemberSpec { + if in == nil { + return nil + } + out := new(GroupIAMMemberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupIAMMemberStatus) DeepCopyInto(out *GroupIAMMemberStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupIAMMemberStatus. +func (in *GroupIAMMemberStatus) DeepCopy() *GroupIAMMemberStatus { + if in == nil { + return nil + } + out := new(GroupIAMMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupInitParameters) DeepCopyInto(out *GroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupInitParameters. +func (in *GroupInitParameters) DeepCopy() *GroupInitParameters { + if in == nil { + return nil + } + out := new(GroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupList) DeepCopyInto(out *GroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Group, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList. +func (in *GroupList) DeepCopy() *GroupList { + if in == nil { + return nil + } + out := new(GroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupMembership) DeepCopyInto(out *GroupMembership) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupMembership. +func (in *GroupMembership) DeepCopy() *GroupMembership { + if in == nil { + return nil + } + out := new(GroupMembership) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupMembership) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupMembershipInitParameters) DeepCopyInto(out *GroupMembershipInitParameters) { + *out = *in + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.GroupIDRef != nil { + in, out := &in.GroupIDRef, &out.GroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GroupIDSelector != nil { + in, out := &in.GroupIDSelector, &out.GroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupMembershipInitParameters. +func (in *GroupMembershipInitParameters) DeepCopy() *GroupMembershipInitParameters { + if in == nil { + return nil + } + out := new(GroupMembershipInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupMembershipList) DeepCopyInto(out *GroupMembershipList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GroupMembership, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupMembershipList. +func (in *GroupMembershipList) DeepCopy() *GroupMembershipList { + if in == nil { + return nil + } + out := new(GroupMembershipList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupMembershipList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupMembershipObservation) DeepCopyInto(out *GroupMembershipObservation) { + *out = *in + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupMembershipObservation. +func (in *GroupMembershipObservation) DeepCopy() *GroupMembershipObservation { + if in == nil { + return nil + } + out := new(GroupMembershipObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupMembershipParameters) DeepCopyInto(out *GroupMembershipParameters) { + *out = *in + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.GroupIDRef != nil { + in, out := &in.GroupIDRef, &out.GroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GroupIDSelector != nil { + in, out := &in.GroupIDSelector, &out.GroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupMembershipParameters. +func (in *GroupMembershipParameters) DeepCopy() *GroupMembershipParameters { + if in == nil { + return nil + } + out := new(GroupMembershipParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupMembershipSpec) DeepCopyInto(out *GroupMembershipSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupMembershipSpec. +func (in *GroupMembershipSpec) DeepCopy() *GroupMembershipSpec { + if in == nil { + return nil + } + out := new(GroupMembershipSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupMembershipStatus) DeepCopyInto(out *GroupMembershipStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupMembershipStatus. +func (in *GroupMembershipStatus) DeepCopy() *GroupMembershipStatus { + if in == nil { + return nil + } + out := new(GroupMembershipStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupObservation) DeepCopyInto(out *GroupObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupObservation. +func (in *GroupObservation) DeepCopy() *GroupObservation { + if in == nil { + return nil + } + out := new(GroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupParameters) DeepCopyInto(out *GroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupParameters. +func (in *GroupParameters) DeepCopy() *GroupParameters { + if in == nil { + return nil + } + out := new(GroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSpec) DeepCopyInto(out *GroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSpec. +func (in *GroupSpec) DeepCopy() *GroupSpec { + if in == nil { + return nil + } + out := new(GroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupStatus) DeepCopyInto(out *GroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupStatus. +func (in *GroupStatus) DeepCopy() *GroupStatus { + if in == nil { + return nil + } + out := new(GroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMBinding) DeepCopyInto(out *OrganizationIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMBinding. +func (in *OrganizationIAMBinding) DeepCopy() *OrganizationIAMBinding { + if in == nil { + return nil + } + out := new(OrganizationIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrganizationIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMBindingInitParameters) DeepCopyInto(out *OrganizationIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMBindingInitParameters. +func (in *OrganizationIAMBindingInitParameters) DeepCopy() *OrganizationIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(OrganizationIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMBindingList) DeepCopyInto(out *OrganizationIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OrganizationIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMBindingList. +func (in *OrganizationIAMBindingList) DeepCopy() *OrganizationIAMBindingList { + if in == nil { + return nil + } + out := new(OrganizationIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrganizationIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMBindingObservation) DeepCopyInto(out *OrganizationIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMBindingObservation. +func (in *OrganizationIAMBindingObservation) DeepCopy() *OrganizationIAMBindingObservation { + if in == nil { + return nil + } + out := new(OrganizationIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMBindingParameters) DeepCopyInto(out *OrganizationIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMBindingParameters. +func (in *OrganizationIAMBindingParameters) DeepCopy() *OrganizationIAMBindingParameters { + if in == nil { + return nil + } + out := new(OrganizationIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMBindingSpec) DeepCopyInto(out *OrganizationIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMBindingSpec. +func (in *OrganizationIAMBindingSpec) DeepCopy() *OrganizationIAMBindingSpec { + if in == nil { + return nil + } + out := new(OrganizationIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMBindingStatus) DeepCopyInto(out *OrganizationIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMBindingStatus. +func (in *OrganizationIAMBindingStatus) DeepCopy() *OrganizationIAMBindingStatus { + if in == nil { + return nil + } + out := new(OrganizationIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMMember) DeepCopyInto(out *OrganizationIAMMember) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMMember. +func (in *OrganizationIAMMember) DeepCopy() *OrganizationIAMMember { + if in == nil { + return nil + } + out := new(OrganizationIAMMember) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrganizationIAMMember) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMMemberInitParameters) DeepCopyInto(out *OrganizationIAMMemberInitParameters) { + *out = *in + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMMemberInitParameters. +func (in *OrganizationIAMMemberInitParameters) DeepCopy() *OrganizationIAMMemberInitParameters { + if in == nil { + return nil + } + out := new(OrganizationIAMMemberInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMMemberList) DeepCopyInto(out *OrganizationIAMMemberList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OrganizationIAMMember, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMMemberList. +func (in *OrganizationIAMMemberList) DeepCopy() *OrganizationIAMMemberList { + if in == nil { + return nil + } + out := new(OrganizationIAMMemberList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrganizationIAMMemberList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMMemberObservation) DeepCopyInto(out *OrganizationIAMMemberObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMMemberObservation. +func (in *OrganizationIAMMemberObservation) DeepCopy() *OrganizationIAMMemberObservation { + if in == nil { + return nil + } + out := new(OrganizationIAMMemberObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMMemberParameters) DeepCopyInto(out *OrganizationIAMMemberParameters) { + *out = *in + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMMemberParameters. +func (in *OrganizationIAMMemberParameters) DeepCopy() *OrganizationIAMMemberParameters { + if in == nil { + return nil + } + out := new(OrganizationIAMMemberParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMMemberSpec) DeepCopyInto(out *OrganizationIAMMemberSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMMemberSpec. +func (in *OrganizationIAMMemberSpec) DeepCopy() *OrganizationIAMMemberSpec { + if in == nil { + return nil + } + out := new(OrganizationIAMMemberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationIAMMemberStatus) DeepCopyInto(out *OrganizationIAMMemberStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationIAMMemberStatus. +func (in *OrganizationIAMMemberStatus) DeepCopy() *OrganizationIAMMemberStatus { + if in == nil { + return nil + } + out := new(OrganizationIAMMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsLoginSettings) DeepCopyInto(out *OsLoginSettings) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsLoginSettings. +func (in *OsLoginSettings) DeepCopy() *OsLoginSettings { + if in == nil { + return nil + } + out := new(OsLoginSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OsLoginSettings) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsLoginSettingsInitParameters) DeepCopyInto(out *OsLoginSettingsInitParameters) { + *out = *in + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.SSHCertificateSettings != nil { + in, out := &in.SSHCertificateSettings, &out.SSHCertificateSettings + *out = make([]SSHCertificateSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserSSHKeySettings != nil { + in, out := &in.UserSSHKeySettings, &out.UserSSHKeySettings + *out = make([]UserSSHKeySettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsLoginSettingsInitParameters. +func (in *OsLoginSettingsInitParameters) DeepCopy() *OsLoginSettingsInitParameters { + if in == nil { + return nil + } + out := new(OsLoginSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsLoginSettingsList) DeepCopyInto(out *OsLoginSettingsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OsLoginSettings, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsLoginSettingsList. +func (in *OsLoginSettingsList) DeepCopy() *OsLoginSettingsList { + if in == nil { + return nil + } + out := new(OsLoginSettingsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OsLoginSettingsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsLoginSettingsObservation) DeepCopyInto(out *OsLoginSettingsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.SSHCertificateSettings != nil { + in, out := &in.SSHCertificateSettings, &out.SSHCertificateSettings + *out = make([]SSHCertificateSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserSSHKeySettings != nil { + in, out := &in.UserSSHKeySettings, &out.UserSSHKeySettings + *out = make([]UserSSHKeySettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsLoginSettingsObservation. +func (in *OsLoginSettingsObservation) DeepCopy() *OsLoginSettingsObservation { + if in == nil { + return nil + } + out := new(OsLoginSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsLoginSettingsParameters) DeepCopyInto(out *OsLoginSettingsParameters) { + *out = *in + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.SSHCertificateSettings != nil { + in, out := &in.SSHCertificateSettings, &out.SSHCertificateSettings + *out = make([]SSHCertificateSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserSSHKeySettings != nil { + in, out := &in.UserSSHKeySettings, &out.UserSSHKeySettings + *out = make([]UserSSHKeySettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsLoginSettingsParameters. +func (in *OsLoginSettingsParameters) DeepCopy() *OsLoginSettingsParameters { + if in == nil { + return nil + } + out := new(OsLoginSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsLoginSettingsSpec) DeepCopyInto(out *OsLoginSettingsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsLoginSettingsSpec. +func (in *OsLoginSettingsSpec) DeepCopy() *OsLoginSettingsSpec { + if in == nil { + return nil + } + out := new(OsLoginSettingsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsLoginSettingsStatus) DeepCopyInto(out *OsLoginSettingsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsLoginSettingsStatus. +func (in *OsLoginSettingsStatus) DeepCopy() *OsLoginSettingsStatus { + if in == nil { + return nil + } + out := new(OsLoginSettingsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederation) DeepCopyInto(out *SAMLFederation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederation. +func (in *SAMLFederation) DeepCopy() *SAMLFederation { + if in == nil { + return nil + } + out := new(SAMLFederation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SAMLFederation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationInitParameters) DeepCopyInto(out *SAMLFederationInitParameters) { + *out = *in + if in.AutoCreateAccountOnLogin != nil { + in, out := &in.AutoCreateAccountOnLogin, &out.AutoCreateAccountOnLogin + *out = new(bool) + **out = **in + } + if in.CaseInsensitiveNameIds != nil { + in, out := &in.CaseInsensitiveNameIds, &out.CaseInsensitiveNameIds + *out = new(bool) + **out = **in + } + if in.CookieMaxAge != nil { + in, out := &in.CookieMaxAge, &out.CookieMaxAge + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.SecuritySettings != nil { + in, out := &in.SecuritySettings, &out.SecuritySettings + *out = make([]SecuritySettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SsoBinding != nil { + in, out := &in.SsoBinding, &out.SsoBinding + *out = new(string) + **out = **in + } + if in.SsoURL != nil { + in, out := &in.SsoURL, &out.SsoURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationInitParameters. +func (in *SAMLFederationInitParameters) DeepCopy() *SAMLFederationInitParameters { + if in == nil { + return nil + } + out := new(SAMLFederationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationList) DeepCopyInto(out *SAMLFederationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SAMLFederation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationList. +func (in *SAMLFederationList) DeepCopy() *SAMLFederationList { + if in == nil { + return nil + } + out := new(SAMLFederationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SAMLFederationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationObservation) DeepCopyInto(out *SAMLFederationObservation) { + *out = *in + if in.AutoCreateAccountOnLogin != nil { + in, out := &in.AutoCreateAccountOnLogin, &out.AutoCreateAccountOnLogin + *out = new(bool) + **out = **in + } + if in.CaseInsensitiveNameIds != nil { + in, out := &in.CaseInsensitiveNameIds, &out.CaseInsensitiveNameIds + *out = new(bool) + **out = **in + } + if in.CookieMaxAge != nil { + in, out := &in.CookieMaxAge, &out.CookieMaxAge + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.SecuritySettings != nil { + in, out := &in.SecuritySettings, &out.SecuritySettings + *out = make([]SecuritySettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SsoBinding != nil { + in, out := &in.SsoBinding, &out.SsoBinding + *out = new(string) + **out = **in + } + if in.SsoURL != nil { + in, out := &in.SsoURL, &out.SsoURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationObservation. +func (in *SAMLFederationObservation) DeepCopy() *SAMLFederationObservation { + if in == nil { + return nil + } + out := new(SAMLFederationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationParameters) DeepCopyInto(out *SAMLFederationParameters) { + *out = *in + if in.AutoCreateAccountOnLogin != nil { + in, out := &in.AutoCreateAccountOnLogin, &out.AutoCreateAccountOnLogin + *out = new(bool) + **out = **in + } + if in.CaseInsensitiveNameIds != nil { + in, out := &in.CaseInsensitiveNameIds, &out.CaseInsensitiveNameIds + *out = new(bool) + **out = **in + } + if in.CookieMaxAge != nil { + in, out := &in.CookieMaxAge, &out.CookieMaxAge + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.SecuritySettings != nil { + in, out := &in.SecuritySettings, &out.SecuritySettings + *out = make([]SecuritySettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SsoBinding != nil { + in, out := &in.SsoBinding, &out.SsoBinding + *out = new(string) + **out = **in + } + if in.SsoURL != nil { + in, out := &in.SsoURL, &out.SsoURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationParameters. +func (in *SAMLFederationParameters) DeepCopy() *SAMLFederationParameters { + if in == nil { + return nil + } + out := new(SAMLFederationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationSpec) DeepCopyInto(out *SAMLFederationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationSpec. +func (in *SAMLFederationSpec) DeepCopy() *SAMLFederationSpec { + if in == nil { + return nil + } + out := new(SAMLFederationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationStatus) DeepCopyInto(out *SAMLFederationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationStatus. +func (in *SAMLFederationStatus) DeepCopy() *SAMLFederationStatus { + if in == nil { + return nil + } + out := new(SAMLFederationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationUserAccount) DeepCopyInto(out *SAMLFederationUserAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationUserAccount. +func (in *SAMLFederationUserAccount) DeepCopy() *SAMLFederationUserAccount { + if in == nil { + return nil + } + out := new(SAMLFederationUserAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SAMLFederationUserAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationUserAccountInitParameters) DeepCopyInto(out *SAMLFederationUserAccountInitParameters) { + *out = *in + if in.FederationID != nil { + in, out := &in.FederationID, &out.FederationID + *out = new(string) + **out = **in + } + if in.NameID != nil { + in, out := &in.NameID, &out.NameID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationUserAccountInitParameters. +func (in *SAMLFederationUserAccountInitParameters) DeepCopy() *SAMLFederationUserAccountInitParameters { + if in == nil { + return nil + } + out := new(SAMLFederationUserAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationUserAccountList) DeepCopyInto(out *SAMLFederationUserAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SAMLFederationUserAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationUserAccountList. +func (in *SAMLFederationUserAccountList) DeepCopy() *SAMLFederationUserAccountList { + if in == nil { + return nil + } + out := new(SAMLFederationUserAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SAMLFederationUserAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationUserAccountObservation) DeepCopyInto(out *SAMLFederationUserAccountObservation) { + *out = *in + if in.FederationID != nil { + in, out := &in.FederationID, &out.FederationID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NameID != nil { + in, out := &in.NameID, &out.NameID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationUserAccountObservation. +func (in *SAMLFederationUserAccountObservation) DeepCopy() *SAMLFederationUserAccountObservation { + if in == nil { + return nil + } + out := new(SAMLFederationUserAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationUserAccountParameters) DeepCopyInto(out *SAMLFederationUserAccountParameters) { + *out = *in + if in.FederationID != nil { + in, out := &in.FederationID, &out.FederationID + *out = new(string) + **out = **in + } + if in.NameID != nil { + in, out := &in.NameID, &out.NameID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationUserAccountParameters. +func (in *SAMLFederationUserAccountParameters) DeepCopy() *SAMLFederationUserAccountParameters { + if in == nil { + return nil + } + out := new(SAMLFederationUserAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationUserAccountSpec) DeepCopyInto(out *SAMLFederationUserAccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationUserAccountSpec. +func (in *SAMLFederationUserAccountSpec) DeepCopy() *SAMLFederationUserAccountSpec { + if in == nil { + return nil + } + out := new(SAMLFederationUserAccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLFederationUserAccountStatus) DeepCopyInto(out *SAMLFederationUserAccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLFederationUserAccountStatus. +func (in *SAMLFederationUserAccountStatus) DeepCopy() *SAMLFederationUserAccountStatus { + if in == nil { + return nil + } + out := new(SAMLFederationUserAccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHCertificateSettingsInitParameters) DeepCopyInto(out *SSHCertificateSettingsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHCertificateSettingsInitParameters. +func (in *SSHCertificateSettingsInitParameters) DeepCopy() *SSHCertificateSettingsInitParameters { + if in == nil { + return nil + } + out := new(SSHCertificateSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHCertificateSettingsObservation) DeepCopyInto(out *SSHCertificateSettingsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHCertificateSettingsObservation. +func (in *SSHCertificateSettingsObservation) DeepCopy() *SSHCertificateSettingsObservation { + if in == nil { + return nil + } + out := new(SSHCertificateSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHCertificateSettingsParameters) DeepCopyInto(out *SSHCertificateSettingsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHCertificateSettingsParameters. +func (in *SSHCertificateSettingsParameters) DeepCopy() *SSHCertificateSettingsParameters { + if in == nil { + return nil + } + out := new(SSHCertificateSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecuritySettingsInitParameters) DeepCopyInto(out *SecuritySettingsInitParameters) { + *out = *in + if in.EncryptedAssertions != nil { + in, out := &in.EncryptedAssertions, &out.EncryptedAssertions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySettingsInitParameters. +func (in *SecuritySettingsInitParameters) DeepCopy() *SecuritySettingsInitParameters { + if in == nil { + return nil + } + out := new(SecuritySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecuritySettingsObservation) DeepCopyInto(out *SecuritySettingsObservation) { + *out = *in + if in.EncryptedAssertions != nil { + in, out := &in.EncryptedAssertions, &out.EncryptedAssertions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySettingsObservation. +func (in *SecuritySettingsObservation) DeepCopy() *SecuritySettingsObservation { + if in == nil { + return nil + } + out := new(SecuritySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecuritySettingsParameters) DeepCopyInto(out *SecuritySettingsParameters) { + *out = *in + if in.EncryptedAssertions != nil { + in, out := &in.EncryptedAssertions, &out.EncryptedAssertions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySettingsParameters. +func (in *SecuritySettingsParameters) DeepCopy() *SecuritySettingsParameters { + if in == nil { + return nil + } + out := new(SecuritySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKey) DeepCopyInto(out *UserSSHKey) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKey. +func (in *UserSSHKey) DeepCopy() *UserSSHKey { + if in == nil { + return nil + } + out := new(UserSSHKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserSSHKey) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKeyInitParameters) DeepCopyInto(out *UserSSHKeyInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.ExpiresAt != nil { + in, out := &in.ExpiresAt, &out.ExpiresAt + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.SubjectID != nil { + in, out := &in.SubjectID, &out.SubjectID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKeyInitParameters. +func (in *UserSSHKeyInitParameters) DeepCopy() *UserSSHKeyInitParameters { + if in == nil { + return nil + } + out := new(UserSSHKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKeyList) DeepCopyInto(out *UserSSHKeyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UserSSHKey, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKeyList. +func (in *UserSSHKeyList) DeepCopy() *UserSSHKeyList { + if in == nil { + return nil + } + out := new(UserSSHKeyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserSSHKeyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKeyObservation) DeepCopyInto(out *UserSSHKeyObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.ExpiresAt != nil { + in, out := &in.ExpiresAt, &out.ExpiresAt + *out = new(string) + **out = **in + } + if in.Fingerprint != nil { + in, out := &in.Fingerprint, &out.Fingerprint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.SubjectID != nil { + in, out := &in.SubjectID, &out.SubjectID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKeyObservation. +func (in *UserSSHKeyObservation) DeepCopy() *UserSSHKeyObservation { + if in == nil { + return nil + } + out := new(UserSSHKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKeyParameters) DeepCopyInto(out *UserSSHKeyParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.ExpiresAt != nil { + in, out := &in.ExpiresAt, &out.ExpiresAt + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } + if in.SubjectID != nil { + in, out := &in.SubjectID, &out.SubjectID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKeyParameters. +func (in *UserSSHKeyParameters) DeepCopy() *UserSSHKeyParameters { + if in == nil { + return nil + } + out := new(UserSSHKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKeySettingsInitParameters) DeepCopyInto(out *UserSSHKeySettingsInitParameters) { + *out = *in + if in.AllowManageOwnKeys != nil { + in, out := &in.AllowManageOwnKeys, &out.AllowManageOwnKeys + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKeySettingsInitParameters. +func (in *UserSSHKeySettingsInitParameters) DeepCopy() *UserSSHKeySettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSSHKeySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKeySettingsObservation) DeepCopyInto(out *UserSSHKeySettingsObservation) { + *out = *in + if in.AllowManageOwnKeys != nil { + in, out := &in.AllowManageOwnKeys, &out.AllowManageOwnKeys + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKeySettingsObservation. +func (in *UserSSHKeySettingsObservation) DeepCopy() *UserSSHKeySettingsObservation { + if in == nil { + return nil + } + out := new(UserSSHKeySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKeySettingsParameters) DeepCopyInto(out *UserSSHKeySettingsParameters) { + *out = *in + if in.AllowManageOwnKeys != nil { + in, out := &in.AllowManageOwnKeys, &out.AllowManageOwnKeys + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKeySettingsParameters. +func (in *UserSSHKeySettingsParameters) DeepCopy() *UserSSHKeySettingsParameters { + if in == nil { + return nil + } + out := new(UserSSHKeySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKeySpec) DeepCopyInto(out *UserSSHKeySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKeySpec. +func (in *UserSSHKeySpec) DeepCopy() *UserSSHKeySpec { + if in == nil { + return nil + } + out := new(UserSSHKeySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSSHKeyStatus) DeepCopyInto(out *UserSSHKeyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSSHKeyStatus. +func (in *UserSSHKeyStatus) DeepCopy() *UserSSHKeyStatus { + if in == nil { + return nil + } + out := new(UserSSHKeyStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/organizationmanager/v1alpha1/zz_generated.resolvers.go b/apis/organizationmanager/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..b8eff39 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,94 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this GroupIAMMember. +func (mg *GroupIAMMember) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.GroupIDRef, + Selector: mg.Spec.ForProvider.GroupIDSelector, + To: reference.To{ + List: &GroupList{}, + Managed: &Group{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GroupID") + } + mg.Spec.ForProvider.GroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GroupIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.GroupIDRef, + Selector: mg.Spec.InitProvider.GroupIDSelector, + To: reference.To{ + List: &GroupList{}, + Managed: &Group{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GroupID") + } + mg.Spec.InitProvider.GroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.GroupIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this GroupMembership. +func (mg *GroupMembership) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.GroupIDRef, + Selector: mg.Spec.ForProvider.GroupIDSelector, + To: reference.To{ + List: &GroupList{}, + Managed: &Group{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GroupID") + } + mg.Spec.ForProvider.GroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GroupIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GroupID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.GroupIDRef, + Selector: mg.Spec.InitProvider.GroupIDSelector, + To: reference.To{ + List: &GroupList{}, + Managed: &Group{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GroupID") + } + mg.Spec.InitProvider.GroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.GroupIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/organizationmanager/v1alpha1/zz_group_terraformed.go b/apis/organizationmanager/v1alpha1/zz_group_terraformed.go new file mode 100755 index 0000000..02dfcc5 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_group_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Group +func (mg *Group) GetTerraformResourceType() string { + return "yandex_organizationmanager_group" +} + +// GetConnectionDetailsMapping for this Group +func (tr *Group) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Group +func (tr *Group) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Group +func (tr *Group) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Group +func (tr *Group) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Group +func (tr *Group) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Group +func (tr *Group) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Group +func (tr *Group) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Group +func (tr *Group) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Group using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Group) LateInitialize(attrs []byte) (bool, error) { + params := &GroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Group) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/organizationmanager/v1alpha1/zz_group_types.go b/apis/organizationmanager/v1alpha1/zz_group_types.go new file mode 100755 index 0000000..1b0a14e --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_group_types.go @@ -0,0 +1,117 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GroupInitParameters struct { + + // The description of the Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The organization to attach this Group to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` +} + +type GroupObservation struct { + + // (Computed) The SAML Federation creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The description of the Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The organization to attach this Group to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` +} + +type GroupParameters struct { + + // The description of the Group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the Group. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The organization to attach this Group to. + // +kubebuilder:validation:Optional + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` +} + +// GroupSpec defines the desired state of Group +type GroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GroupInitParameters `json:"initProvider,omitempty"` +} + +// GroupStatus defines the observed state of Group. +type GroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Group is the Schema for the Groups API. Allows management of a single Group within an existing Yandex.Cloud Organization. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Group struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.organizationId) || (has(self.initProvider) && has(self.initProvider.organizationId))",message="spec.forProvider.organizationId is a required parameter" + Spec GroupSpec `json:"spec"` + Status GroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GroupList contains a list of Groups +type GroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Group `json:"items"` +} + +// Repository type metadata. +var ( + Group_Kind = "Group" + Group_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Group_Kind}.String() + Group_KindAPIVersion = Group_Kind + "." + CRDGroupVersion.String() + Group_GroupVersionKind = CRDGroupVersion.WithKind(Group_Kind) +) + +func init() { + SchemeBuilder.Register(&Group{}, &GroupList{}) +} diff --git a/apis/organizationmanager/v1alpha1/zz_groupiammember_terraformed.go b/apis/organizationmanager/v1alpha1/zz_groupiammember_terraformed.go new file mode 100755 index 0000000..df5cb10 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_groupiammember_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GroupIAMMember +func (mg *GroupIAMMember) GetTerraformResourceType() string { + return "yandex_organizationmanager_group_iam_member" +} + +// GetConnectionDetailsMapping for this GroupIAMMember +func (tr *GroupIAMMember) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GroupIAMMember +func (tr *GroupIAMMember) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GroupIAMMember +func (tr *GroupIAMMember) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GroupIAMMember +func (tr *GroupIAMMember) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GroupIAMMember +func (tr *GroupIAMMember) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GroupIAMMember +func (tr *GroupIAMMember) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GroupIAMMember +func (tr *GroupIAMMember) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GroupIAMMember +func (tr *GroupIAMMember) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GroupIAMMember using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GroupIAMMember) LateInitialize(attrs []byte) (bool, error) { + params := &GroupIAMMemberParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GroupIAMMember) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/organizationmanager/v1alpha1/zz_groupiammember_types.go b/apis/organizationmanager/v1alpha1/zz_groupiammember_types.go new file mode 100755 index 0000000..be5fef3 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_groupiammember_types.go @@ -0,0 +1,139 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GroupIAMMemberInitParameters struct { + + // ID of the organization to attach a policy to. + // +crossplane:generate:reference:type=Group + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + // Reference to a Group to populate groupId. + // +kubebuilder:validation:Optional + GroupIDRef *v1.Reference `json:"groupIdRef,omitempty" tf:"-"` + + // Selector for a Group to populate groupId. + // +kubebuilder:validation:Optional + GroupIDSelector *v1.Selector `json:"groupIdSelector,omitempty" tf:"-"` + + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // The role that should be assigned. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type GroupIAMMemberObservation struct { + + // ID of the organization to attach a policy to. + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // The role that should be assigned. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type GroupIAMMemberParameters struct { + + // ID of the organization to attach a policy to. + // +crossplane:generate:reference:type=Group + // +kubebuilder:validation:Optional + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + // Reference to a Group to populate groupId. + // +kubebuilder:validation:Optional + GroupIDRef *v1.Reference `json:"groupIdRef,omitempty" tf:"-"` + + // Selector for a Group to populate groupId. + // +kubebuilder:validation:Optional + GroupIDSelector *v1.Selector `json:"groupIdSelector,omitempty" tf:"-"` + + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + // +kubebuilder:validation:Optional + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // The role that should be assigned. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// GroupIAMMemberSpec defines the desired state of GroupIAMMember +type GroupIAMMemberSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GroupIAMMemberParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GroupIAMMemberInitParameters `json:"initProvider,omitempty"` +} + +// GroupIAMMemberStatus defines the observed state of GroupIAMMember. +type GroupIAMMemberStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GroupIAMMemberObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// GroupIAMMember is the Schema for the GroupIAMMembers API. Allows management of a single member for a single IAM binding on a Yandex.Cloud Organization Manager Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type GroupIAMMember struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.member) || (has(self.initProvider) && has(self.initProvider.member))",message="spec.forProvider.member is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec GroupIAMMemberSpec `json:"spec"` + Status GroupIAMMemberStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GroupIAMMemberList contains a list of GroupIAMMembers +type GroupIAMMemberList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GroupIAMMember `json:"items"` +} + +// Repository type metadata. +var ( + GroupIAMMember_Kind = "GroupIAMMember" + GroupIAMMember_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GroupIAMMember_Kind}.String() + GroupIAMMember_KindAPIVersion = GroupIAMMember_Kind + "." + CRDGroupVersion.String() + GroupIAMMember_GroupVersionKind = CRDGroupVersion.WithKind(GroupIAMMember_Kind) +) + +func init() { + SchemeBuilder.Register(&GroupIAMMember{}, &GroupIAMMemberList{}) +} diff --git a/apis/organizationmanager/v1alpha1/zz_groupmembership_terraformed.go b/apis/organizationmanager/v1alpha1/zz_groupmembership_terraformed.go new file mode 100755 index 0000000..e4b0e06 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_groupmembership_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GroupMembership +func (mg *GroupMembership) GetTerraformResourceType() string { + return "yandex_organizationmanager_group_membership" +} + +// GetConnectionDetailsMapping for this GroupMembership +func (tr *GroupMembership) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GroupMembership +func (tr *GroupMembership) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GroupMembership +func (tr *GroupMembership) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GroupMembership +func (tr *GroupMembership) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GroupMembership +func (tr *GroupMembership) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GroupMembership +func (tr *GroupMembership) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GroupMembership +func (tr *GroupMembership) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GroupMembership +func (tr *GroupMembership) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GroupMembership using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GroupMembership) LateInitialize(attrs []byte) (bool, error) { + params := &GroupMembershipParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GroupMembership) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/organizationmanager/v1alpha1/zz_groupmembership_types.go b/apis/organizationmanager/v1alpha1/zz_groupmembership_types.go new file mode 100755 index 0000000..b4cd833 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_groupmembership_types.go @@ -0,0 +1,124 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GroupMembershipInitParameters struct { + + // The Group to add/remove members to/from. + // +crossplane:generate:reference:type=Group + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + // Reference to a Group to populate groupId. + // +kubebuilder:validation:Optional + GroupIDRef *v1.Reference `json:"groupIdRef,omitempty" tf:"-"` + + // Selector for a Group to populate groupId. + // +kubebuilder:validation:Optional + GroupIDSelector *v1.Selector `json:"groupIdSelector,omitempty" tf:"-"` + + // A set of members of the Group. Each member is represented by an id. + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` +} + +type GroupMembershipObservation struct { + + // The Group to add/remove members to/from. + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of members of the Group. Each member is represented by an id. + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` +} + +type GroupMembershipParameters struct { + + // The Group to add/remove members to/from. + // +crossplane:generate:reference:type=Group + // +kubebuilder:validation:Optional + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + // Reference to a Group to populate groupId. + // +kubebuilder:validation:Optional + GroupIDRef *v1.Reference `json:"groupIdRef,omitempty" tf:"-"` + + // Selector for a Group to populate groupId. + // +kubebuilder:validation:Optional + GroupIDSelector *v1.Selector `json:"groupIdSelector,omitempty" tf:"-"` + + // A set of members of the Group. Each member is represented by an id. + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` +} + +// GroupMembershipSpec defines the desired state of GroupMembership +type GroupMembershipSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GroupMembershipParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GroupMembershipInitParameters `json:"initProvider,omitempty"` +} + +// GroupMembershipStatus defines the observed state of GroupMembership. +type GroupMembershipStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GroupMembershipObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// GroupMembership is the Schema for the GroupMemberships API. Allows management of members of Yandex.Cloud Organization Manager Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type GroupMembership struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + Spec GroupMembershipSpec `json:"spec"` + Status GroupMembershipStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GroupMembershipList contains a list of GroupMemberships +type GroupMembershipList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GroupMembership `json:"items"` +} + +// Repository type metadata. +var ( + GroupMembership_Kind = "GroupMembership" + GroupMembership_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GroupMembership_Kind}.String() + GroupMembership_KindAPIVersion = GroupMembership_Kind + "." + CRDGroupVersion.String() + GroupMembership_GroupVersionKind = CRDGroupVersion.WithKind(GroupMembership_Kind) +) + +func init() { + SchemeBuilder.Register(&GroupMembership{}, &GroupMembershipList{}) +} diff --git a/apis/organizationmanager/v1alpha1/zz_groupversion_info.go b/apis/organizationmanager/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..7d6eda8 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=organizationmanager.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "organizationmanager.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/organizationmanager/v1alpha1/zz_organizationiambinding_terraformed.go b/apis/organizationmanager/v1alpha1/zz_organizationiambinding_terraformed.go new file mode 100755 index 0000000..c622729 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_organizationiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OrganizationIAMBinding +func (mg *OrganizationIAMBinding) GetTerraformResourceType() string { + return "yandex_organizationmanager_organization_iam_binding" +} + +// GetConnectionDetailsMapping for this OrganizationIAMBinding +func (tr *OrganizationIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this OrganizationIAMBinding +func (tr *OrganizationIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OrganizationIAMBinding +func (tr *OrganizationIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OrganizationIAMBinding +func (tr *OrganizationIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OrganizationIAMBinding +func (tr *OrganizationIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OrganizationIAMBinding +func (tr *OrganizationIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OrganizationIAMBinding +func (tr *OrganizationIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OrganizationIAMBinding +func (tr *OrganizationIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OrganizationIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OrganizationIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &OrganizationIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OrganizationIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/organizationmanager/v1alpha1/zz_organizationiambinding_types.go b/apis/organizationmanager/v1alpha1/zz_organizationiambinding_types.go new file mode 100755 index 0000000..eabec32 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_organizationiambinding_types.go @@ -0,0 +1,124 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OrganizationIAMBindingInitParameters struct { + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // ID of the organization to attach the policy to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // The role that should be assigned. Only one yandex_organizationmanager_organization_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type OrganizationIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // ID of the organization to attach the policy to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // The role that should be assigned. Only one yandex_organizationmanager_organization_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type OrganizationIAMBindingParameters struct { + + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // ID of the organization to attach the policy to. + // +kubebuilder:validation:Optional + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // The role that should be assigned. Only one yandex_organizationmanager_organization_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// OrganizationIAMBindingSpec defines the desired state of OrganizationIAMBinding +type OrganizationIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OrganizationIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OrganizationIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// OrganizationIAMBindingStatus defines the observed state of OrganizationIAMBinding. +type OrganizationIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OrganizationIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// OrganizationIAMBinding is the Schema for the OrganizationIAMBindings API. Allows management of a single IAM binding for a Yandex Organization Manager organization. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type OrganizationIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.organizationId) || (has(self.initProvider) && has(self.initProvider.organizationId))",message="spec.forProvider.organizationId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec OrganizationIAMBindingSpec `json:"spec"` + Status OrganizationIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OrganizationIAMBindingList contains a list of OrganizationIAMBindings +type OrganizationIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OrganizationIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + OrganizationIAMBinding_Kind = "OrganizationIAMBinding" + OrganizationIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OrganizationIAMBinding_Kind}.String() + OrganizationIAMBinding_KindAPIVersion = OrganizationIAMBinding_Kind + "." + CRDGroupVersion.String() + OrganizationIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(OrganizationIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&OrganizationIAMBinding{}, &OrganizationIAMBindingList{}) +} diff --git a/apis/organizationmanager/v1alpha1/zz_organizationiammember_terraformed.go b/apis/organizationmanager/v1alpha1/zz_organizationiammember_terraformed.go new file mode 100755 index 0000000..e18b197 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_organizationiammember_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OrganizationIAMMember +func (mg *OrganizationIAMMember) GetTerraformResourceType() string { + return "yandex_organizationmanager_organization_iam_member" +} + +// GetConnectionDetailsMapping for this OrganizationIAMMember +func (tr *OrganizationIAMMember) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this OrganizationIAMMember +func (tr *OrganizationIAMMember) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OrganizationIAMMember +func (tr *OrganizationIAMMember) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OrganizationIAMMember +func (tr *OrganizationIAMMember) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OrganizationIAMMember +func (tr *OrganizationIAMMember) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OrganizationIAMMember +func (tr *OrganizationIAMMember) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OrganizationIAMMember +func (tr *OrganizationIAMMember) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OrganizationIAMMember +func (tr *OrganizationIAMMember) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OrganizationIAMMember using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OrganizationIAMMember) LateInitialize(attrs []byte) (bool, error) { + params := &OrganizationIAMMemberParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OrganizationIAMMember) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/organizationmanager/v1alpha1/zz_organizationiammember_types.go b/apis/organizationmanager/v1alpha1/zz_organizationiammember_types.go new file mode 100755 index 0000000..df942a7 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_organizationiammember_types.go @@ -0,0 +1,121 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OrganizationIAMMemberInitParameters struct { + + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // ID of the organization to attach a policy to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // The role that should be assigned. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type OrganizationIAMMemberObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // ID of the organization to attach a policy to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // The role that should be assigned. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type OrganizationIAMMemberParameters struct { + + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + // +kubebuilder:validation:Optional + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // ID of the organization to attach a policy to. + // +kubebuilder:validation:Optional + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // The role that should be assigned. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// OrganizationIAMMemberSpec defines the desired state of OrganizationIAMMember +type OrganizationIAMMemberSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OrganizationIAMMemberParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OrganizationIAMMemberInitParameters `json:"initProvider,omitempty"` +} + +// OrganizationIAMMemberStatus defines the observed state of OrganizationIAMMember. +type OrganizationIAMMemberStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OrganizationIAMMemberObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// OrganizationIAMMember is the Schema for the OrganizationIAMMembers API. Allows management of a single member for a single IAM binding on a Yandex.Cloud Organization Manager organization. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type OrganizationIAMMember struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.member) || (has(self.initProvider) && has(self.initProvider.member))",message="spec.forProvider.member is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.organizationId) || (has(self.initProvider) && has(self.initProvider.organizationId))",message="spec.forProvider.organizationId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec OrganizationIAMMemberSpec `json:"spec"` + Status OrganizationIAMMemberStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OrganizationIAMMemberList contains a list of OrganizationIAMMembers +type OrganizationIAMMemberList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OrganizationIAMMember `json:"items"` +} + +// Repository type metadata. +var ( + OrganizationIAMMember_Kind = "OrganizationIAMMember" + OrganizationIAMMember_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OrganizationIAMMember_Kind}.String() + OrganizationIAMMember_KindAPIVersion = OrganizationIAMMember_Kind + "." + CRDGroupVersion.String() + OrganizationIAMMember_GroupVersionKind = CRDGroupVersion.WithKind(OrganizationIAMMember_Kind) +) + +func init() { + SchemeBuilder.Register(&OrganizationIAMMember{}, &OrganizationIAMMemberList{}) +} diff --git a/apis/organizationmanager/v1alpha1/zz_osloginsettings_terraformed.go b/apis/organizationmanager/v1alpha1/zz_osloginsettings_terraformed.go new file mode 100755 index 0000000..362b033 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_osloginsettings_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OsLoginSettings +func (mg *OsLoginSettings) GetTerraformResourceType() string { + return "yandex_organizationmanager_os_login_settings" +} + +// GetConnectionDetailsMapping for this OsLoginSettings +func (tr *OsLoginSettings) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this OsLoginSettings +func (tr *OsLoginSettings) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OsLoginSettings +func (tr *OsLoginSettings) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OsLoginSettings +func (tr *OsLoginSettings) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OsLoginSettings +func (tr *OsLoginSettings) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OsLoginSettings +func (tr *OsLoginSettings) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OsLoginSettings +func (tr *OsLoginSettings) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OsLoginSettings +func (tr *OsLoginSettings) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OsLoginSettings using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OsLoginSettings) LateInitialize(attrs []byte) (bool, error) { + params := &OsLoginSettingsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OsLoginSettings) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/organizationmanager/v1alpha1/zz_osloginsettings_types.go b/apis/organizationmanager/v1alpha1/zz_osloginsettings_types.go new file mode 100755 index 0000000..873c440 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_osloginsettings_types.go @@ -0,0 +1,160 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OsLoginSettingsInitParameters struct { + + // The organization to manage it's OsLogin Settings. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // The structure is documented below. + SSHCertificateSettings []SSHCertificateSettingsInitParameters `json:"sshCertificateSettings,omitempty" tf:"ssh_certificate_settings,omitempty"` + + // The structure is documented below. + UserSSHKeySettings []UserSSHKeySettingsInitParameters `json:"userSshKeySettings,omitempty" tf:"user_ssh_key_settings,omitempty"` +} + +type OsLoginSettingsObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The organization to manage it's OsLogin Settings. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // The structure is documented below. + SSHCertificateSettings []SSHCertificateSettingsObservation `json:"sshCertificateSettings,omitempty" tf:"ssh_certificate_settings,omitempty"` + + // The structure is documented below. + UserSSHKeySettings []UserSSHKeySettingsObservation `json:"userSshKeySettings,omitempty" tf:"user_ssh_key_settings,omitempty"` +} + +type OsLoginSettingsParameters struct { + + // The organization to manage it's OsLogin Settings. + // +kubebuilder:validation:Optional + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // The structure is documented below. + // +kubebuilder:validation:Optional + SSHCertificateSettings []SSHCertificateSettingsParameters `json:"sshCertificateSettings,omitempty" tf:"ssh_certificate_settings,omitempty"` + + // The structure is documented below. + // +kubebuilder:validation:Optional + UserSSHKeySettings []UserSSHKeySettingsParameters `json:"userSshKeySettings,omitempty" tf:"user_ssh_key_settings,omitempty"` +} + +type SSHCertificateSettingsInitParameters struct { + + // Enables or disables usage of ssh certificates signed by trusted CA. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SSHCertificateSettingsObservation struct { + + // Enables or disables usage of ssh certificates signed by trusted CA. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SSHCertificateSettingsParameters struct { + + // Enables or disables usage of ssh certificates signed by trusted CA. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type UserSSHKeySettingsInitParameters struct { + + // If set to true subject is allowed to manage own ssh keys without having to be assigned specific permissions. + AllowManageOwnKeys *bool `json:"allowManageOwnKeys,omitempty" tf:"allow_manage_own_keys,omitempty"` + + // Enables or disables usage of ssh keys assigned to a specific subject. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type UserSSHKeySettingsObservation struct { + + // If set to true subject is allowed to manage own ssh keys without having to be assigned specific permissions. + AllowManageOwnKeys *bool `json:"allowManageOwnKeys,omitempty" tf:"allow_manage_own_keys,omitempty"` + + // Enables or disables usage of ssh keys assigned to a specific subject. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type UserSSHKeySettingsParameters struct { + + // If set to true subject is allowed to manage own ssh keys without having to be assigned specific permissions. + // +kubebuilder:validation:Optional + AllowManageOwnKeys *bool `json:"allowManageOwnKeys,omitempty" tf:"allow_manage_own_keys,omitempty"` + + // Enables or disables usage of ssh keys assigned to a specific subject. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +// OsLoginSettingsSpec defines the desired state of OsLoginSettings +type OsLoginSettingsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OsLoginSettingsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OsLoginSettingsInitParameters `json:"initProvider,omitempty"` +} + +// OsLoginSettingsStatus defines the observed state of OsLoginSettings. +type OsLoginSettingsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OsLoginSettingsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// OsLoginSettings is the Schema for the OsLoginSettingss API. Allows management of OsLogin Settings within an existing Yandex.Cloud Organization. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type OsLoginSettings struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.organizationId) || (has(self.initProvider) && has(self.initProvider.organizationId))",message="spec.forProvider.organizationId is a required parameter" + Spec OsLoginSettingsSpec `json:"spec"` + Status OsLoginSettingsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OsLoginSettingsList contains a list of OsLoginSettingss +type OsLoginSettingsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OsLoginSettings `json:"items"` +} + +// Repository type metadata. +var ( + OsLoginSettings_Kind = "OsLoginSettings" + OsLoginSettings_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OsLoginSettings_Kind}.String() + OsLoginSettings_KindAPIVersion = OsLoginSettings_Kind + "." + CRDGroupVersion.String() + OsLoginSettings_GroupVersionKind = CRDGroupVersion.WithKind(OsLoginSettings_Kind) +) + +func init() { + SchemeBuilder.Register(&OsLoginSettings{}, &OsLoginSettingsList{}) +} diff --git a/apis/organizationmanager/v1alpha1/zz_samlfederation_terraformed.go b/apis/organizationmanager/v1alpha1/zz_samlfederation_terraformed.go new file mode 100755 index 0000000..a5885ee --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_samlfederation_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SAMLFederation +func (mg *SAMLFederation) GetTerraformResourceType() string { + return "yandex_organizationmanager_saml_federation" +} + +// GetConnectionDetailsMapping for this SAMLFederation +func (tr *SAMLFederation) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SAMLFederation +func (tr *SAMLFederation) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SAMLFederation +func (tr *SAMLFederation) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SAMLFederation +func (tr *SAMLFederation) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SAMLFederation +func (tr *SAMLFederation) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SAMLFederation +func (tr *SAMLFederation) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SAMLFederation +func (tr *SAMLFederation) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SAMLFederation +func (tr *SAMLFederation) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SAMLFederation using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SAMLFederation) LateInitialize(attrs []byte) (bool, error) { + params := &SAMLFederationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SAMLFederation) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/organizationmanager/v1alpha1/zz_samlfederation_types.go b/apis/organizationmanager/v1alpha1/zz_samlfederation_types.go new file mode 100755 index 0000000..7a65f08 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_samlfederation_types.go @@ -0,0 +1,222 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SAMLFederationInitParameters struct { + + // Add new users automatically on successful authentication. The user will get the resource-manager.clouds.member role automatically, but you need to grant other roles to them. If the value is false, users who aren't added to the cloud can't log in, even if they have authenticated on your server. + AutoCreateAccountOnLogin *bool `json:"autoCreateAccountOnLogin,omitempty" tf:"auto_create_account_on_login,omitempty"` + + // Use case-insensitive name ids. + CaseInsensitiveNameIds *bool `json:"caseInsensitiveNameIds,omitempty" tf:"case_insensitive_name_ids,omitempty"` + + // The lifetime of a Browser cookie in seconds. If the cookie is still valid, the management console authenticates the user immediately and redirects them to the home page. The default value is 8h. + CookieMaxAge *string `json:"cookieMaxAge,omitempty" tf:"cookie_max_age,omitempty"` + + // The description of the SAML Federation. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the IdP server to be used for authentication. The IdP server also responds to IAM with this ID after the user authenticates. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A set of key/value label pairs assigned to the SAML Federation. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the SAML Federation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The organization to attach this SAML Federation to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // Federation security settings, structure is documented below. + SecuritySettings []SecuritySettingsInitParameters `json:"securitySettings,omitempty" tf:"security_settings,omitempty"` + + // Single sign-on endpoint binding type. Most Identity Providers support the POST binding type. SAML Binding is a mapping of a SAML protocol message onto standard messaging formats and/or communications protocols. + SsoBinding *string `json:"ssoBinding,omitempty" tf:"sso_binding,omitempty"` + + // Single sign-on endpoint URL. Specify the link to the IdP login page here. + SsoURL *string `json:"ssoUrl,omitempty" tf:"sso_url,omitempty"` +} + +type SAMLFederationObservation struct { + + // Add new users automatically on successful authentication. The user will get the resource-manager.clouds.member role automatically, but you need to grant other roles to them. If the value is false, users who aren't added to the cloud can't log in, even if they have authenticated on your server. + AutoCreateAccountOnLogin *bool `json:"autoCreateAccountOnLogin,omitempty" tf:"auto_create_account_on_login,omitempty"` + + // Use case-insensitive name ids. + CaseInsensitiveNameIds *bool `json:"caseInsensitiveNameIds,omitempty" tf:"case_insensitive_name_ids,omitempty"` + + // The lifetime of a Browser cookie in seconds. If the cookie is still valid, the management console authenticates the user immediately and redirects them to the home page. The default value is 8h. + CookieMaxAge *string `json:"cookieMaxAge,omitempty" tf:"cookie_max_age,omitempty"` + + // (Computed) The SAML Federation creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The description of the SAML Federation. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ID of the IdP server to be used for authentication. The IdP server also responds to IAM with this ID after the user authenticates. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A set of key/value label pairs assigned to the SAML Federation. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the SAML Federation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The organization to attach this SAML Federation to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // Federation security settings, structure is documented below. + SecuritySettings []SecuritySettingsObservation `json:"securitySettings,omitempty" tf:"security_settings,omitempty"` + + // Single sign-on endpoint binding type. Most Identity Providers support the POST binding type. SAML Binding is a mapping of a SAML protocol message onto standard messaging formats and/or communications protocols. + SsoBinding *string `json:"ssoBinding,omitempty" tf:"sso_binding,omitempty"` + + // Single sign-on endpoint URL. Specify the link to the IdP login page here. + SsoURL *string `json:"ssoUrl,omitempty" tf:"sso_url,omitempty"` +} + +type SAMLFederationParameters struct { + + // Add new users automatically on successful authentication. The user will get the resource-manager.clouds.member role automatically, but you need to grant other roles to them. If the value is false, users who aren't added to the cloud can't log in, even if they have authenticated on your server. + // +kubebuilder:validation:Optional + AutoCreateAccountOnLogin *bool `json:"autoCreateAccountOnLogin,omitempty" tf:"auto_create_account_on_login,omitempty"` + + // Use case-insensitive name ids. + // +kubebuilder:validation:Optional + CaseInsensitiveNameIds *bool `json:"caseInsensitiveNameIds,omitempty" tf:"case_insensitive_name_ids,omitempty"` + + // The lifetime of a Browser cookie in seconds. If the cookie is still valid, the management console authenticates the user immediately and redirects them to the home page. The default value is 8h. + // +kubebuilder:validation:Optional + CookieMaxAge *string `json:"cookieMaxAge,omitempty" tf:"cookie_max_age,omitempty"` + + // The description of the SAML Federation. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the IdP server to be used for authentication. The IdP server also responds to IAM with this ID after the user authenticates. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A set of key/value label pairs assigned to the SAML Federation. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the SAML Federation. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The organization to attach this SAML Federation to. + // +kubebuilder:validation:Optional + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // Federation security settings, structure is documented below. + // +kubebuilder:validation:Optional + SecuritySettings []SecuritySettingsParameters `json:"securitySettings,omitempty" tf:"security_settings,omitempty"` + + // Single sign-on endpoint binding type. Most Identity Providers support the POST binding type. SAML Binding is a mapping of a SAML protocol message onto standard messaging formats and/or communications protocols. + // +kubebuilder:validation:Optional + SsoBinding *string `json:"ssoBinding,omitempty" tf:"sso_binding,omitempty"` + + // Single sign-on endpoint URL. Specify the link to the IdP login page here. + // +kubebuilder:validation:Optional + SsoURL *string `json:"ssoUrl,omitempty" tf:"sso_url,omitempty"` +} + +type SecuritySettingsInitParameters struct { + + // Enable encrypted assertions. + EncryptedAssertions *bool `json:"encryptedAssertions,omitempty" tf:"encrypted_assertions,omitempty"` +} + +type SecuritySettingsObservation struct { + + // Enable encrypted assertions. + EncryptedAssertions *bool `json:"encryptedAssertions,omitempty" tf:"encrypted_assertions,omitempty"` +} + +type SecuritySettingsParameters struct { + + // Enable encrypted assertions. + // +kubebuilder:validation:Optional + EncryptedAssertions *bool `json:"encryptedAssertions" tf:"encrypted_assertions,omitempty"` +} + +// SAMLFederationSpec defines the desired state of SAMLFederation +type SAMLFederationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SAMLFederationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SAMLFederationInitParameters `json:"initProvider,omitempty"` +} + +// SAMLFederationStatus defines the observed state of SAMLFederation. +type SAMLFederationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SAMLFederationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SAMLFederation is the Schema for the SAMLFederations API. Allows management of a single SAML Federation within an existing Yandex.Cloud Organization. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SAMLFederation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.issuer) || (has(self.initProvider) && has(self.initProvider.issuer))",message="spec.forProvider.issuer is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.organizationId) || (has(self.initProvider) && has(self.initProvider.organizationId))",message="spec.forProvider.organizationId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ssoBinding) || (has(self.initProvider) && has(self.initProvider.ssoBinding))",message="spec.forProvider.ssoBinding is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ssoUrl) || (has(self.initProvider) && has(self.initProvider.ssoUrl))",message="spec.forProvider.ssoUrl is a required parameter" + Spec SAMLFederationSpec `json:"spec"` + Status SAMLFederationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SAMLFederationList contains a list of SAMLFederations +type SAMLFederationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SAMLFederation `json:"items"` +} + +// Repository type metadata. +var ( + SAMLFederation_Kind = "SAMLFederation" + SAMLFederation_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SAMLFederation_Kind}.String() + SAMLFederation_KindAPIVersion = SAMLFederation_Kind + "." + CRDGroupVersion.String() + SAMLFederation_GroupVersionKind = CRDGroupVersion.WithKind(SAMLFederation_Kind) +) + +func init() { + SchemeBuilder.Register(&SAMLFederation{}, &SAMLFederationList{}) +} diff --git a/apis/organizationmanager/v1alpha1/zz_samlfederationuseraccount_terraformed.go b/apis/organizationmanager/v1alpha1/zz_samlfederationuseraccount_terraformed.go new file mode 100755 index 0000000..07990d9 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_samlfederationuseraccount_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SAMLFederationUserAccount +func (mg *SAMLFederationUserAccount) GetTerraformResourceType() string { + return "yandex_organizationmanager_saml_federation_user_account" +} + +// GetConnectionDetailsMapping for this SAMLFederationUserAccount +func (tr *SAMLFederationUserAccount) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SAMLFederationUserAccount +func (tr *SAMLFederationUserAccount) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SAMLFederationUserAccount +func (tr *SAMLFederationUserAccount) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SAMLFederationUserAccount +func (tr *SAMLFederationUserAccount) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SAMLFederationUserAccount +func (tr *SAMLFederationUserAccount) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SAMLFederationUserAccount +func (tr *SAMLFederationUserAccount) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SAMLFederationUserAccount +func (tr *SAMLFederationUserAccount) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SAMLFederationUserAccount +func (tr *SAMLFederationUserAccount) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SAMLFederationUserAccount using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SAMLFederationUserAccount) LateInitialize(attrs []byte) (bool, error) { + params := &SAMLFederationUserAccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SAMLFederationUserAccount) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/organizationmanager/v1alpha1/zz_samlfederationuseraccount_types.go b/apis/organizationmanager/v1alpha1/zz_samlfederationuseraccount_types.go new file mode 100755 index 0000000..f30ad14 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_samlfederationuseraccount_types.go @@ -0,0 +1,104 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SAMLFederationUserAccountInitParameters struct { + + // ID of a SAML Federation. + FederationID *string `json:"federationId,omitempty" tf:"federation_id,omitempty"` + + // Name ID of the SAML federated user. + NameID *string `json:"nameId,omitempty" tf:"name_id,omitempty"` +} + +type SAMLFederationUserAccountObservation struct { + + // ID of a SAML Federation. + FederationID *string `json:"federationId,omitempty" tf:"federation_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name ID of the SAML federated user. + NameID *string `json:"nameId,omitempty" tf:"name_id,omitempty"` +} + +type SAMLFederationUserAccountParameters struct { + + // ID of a SAML Federation. + // +kubebuilder:validation:Optional + FederationID *string `json:"federationId,omitempty" tf:"federation_id,omitempty"` + + // Name ID of the SAML federated user. + // +kubebuilder:validation:Optional + NameID *string `json:"nameId,omitempty" tf:"name_id,omitempty"` +} + +// SAMLFederationUserAccountSpec defines the desired state of SAMLFederationUserAccount +type SAMLFederationUserAccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SAMLFederationUserAccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SAMLFederationUserAccountInitParameters `json:"initProvider,omitempty"` +} + +// SAMLFederationUserAccountStatus defines the observed state of SAMLFederationUserAccount. +type SAMLFederationUserAccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SAMLFederationUserAccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SAMLFederationUserAccount is the Schema for the SAMLFederationUserAccounts API. Allows management of a single SAML Federation user account within an existing Yandex.Cloud Organization. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SAMLFederationUserAccount struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.federationId) || (has(self.initProvider) && has(self.initProvider.federationId))",message="spec.forProvider.federationId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.nameId) || (has(self.initProvider) && has(self.initProvider.nameId))",message="spec.forProvider.nameId is a required parameter" + Spec SAMLFederationUserAccountSpec `json:"spec"` + Status SAMLFederationUserAccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SAMLFederationUserAccountList contains a list of SAMLFederationUserAccounts +type SAMLFederationUserAccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SAMLFederationUserAccount `json:"items"` +} + +// Repository type metadata. +var ( + SAMLFederationUserAccount_Kind = "SAMLFederationUserAccount" + SAMLFederationUserAccount_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SAMLFederationUserAccount_Kind}.String() + SAMLFederationUserAccount_KindAPIVersion = SAMLFederationUserAccount_Kind + "." + CRDGroupVersion.String() + SAMLFederationUserAccount_GroupVersionKind = CRDGroupVersion.WithKind(SAMLFederationUserAccount_Kind) +) + +func init() { + SchemeBuilder.Register(&SAMLFederationUserAccount{}, &SAMLFederationUserAccountList{}) +} diff --git a/apis/organizationmanager/v1alpha1/zz_usersshkey_terraformed.go b/apis/organizationmanager/v1alpha1/zz_usersshkey_terraformed.go new file mode 100755 index 0000000..732e911 --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_usersshkey_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this UserSSHKey +func (mg *UserSSHKey) GetTerraformResourceType() string { + return "yandex_organizationmanager_user_ssh_key" +} + +// GetConnectionDetailsMapping for this UserSSHKey +func (tr *UserSSHKey) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this UserSSHKey +func (tr *UserSSHKey) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this UserSSHKey +func (tr *UserSSHKey) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this UserSSHKey +func (tr *UserSSHKey) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this UserSSHKey +func (tr *UserSSHKey) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this UserSSHKey +func (tr *UserSSHKey) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this UserSSHKey +func (tr *UserSSHKey) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this UserSSHKey +func (tr *UserSSHKey) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this UserSSHKey using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *UserSSHKey) LateInitialize(attrs []byte) (bool, error) { + params := &UserSSHKeyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *UserSSHKey) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/organizationmanager/v1alpha1/zz_usersshkey_types.go b/apis/organizationmanager/v1alpha1/zz_usersshkey_types.go new file mode 100755 index 0000000..cac389d --- /dev/null +++ b/apis/organizationmanager/v1alpha1/zz_usersshkey_types.go @@ -0,0 +1,138 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type UserSSHKeyInitParameters struct { + + // Data of the user ssh key. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // User ssh key will be no longer valid after expiration timestamp. + ExpiresAt *string `json:"expiresAt,omitempty" tf:"expires_at,omitempty"` + + // Name of the user ssh key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Organization that the user ssh key belongs to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // Subject that the user ssh key belongs to. + SubjectID *string `json:"subjectId,omitempty" tf:"subject_id,omitempty"` +} + +type UserSSHKeyObservation struct { + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Data of the user ssh key. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // User ssh key will be no longer valid after expiration timestamp. + ExpiresAt *string `json:"expiresAt,omitempty" tf:"expires_at,omitempty"` + + Fingerprint *string `json:"fingerprint,omitempty" tf:"fingerprint,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the user ssh key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Organization that the user ssh key belongs to. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // Subject that the user ssh key belongs to. + SubjectID *string `json:"subjectId,omitempty" tf:"subject_id,omitempty"` +} + +type UserSSHKeyParameters struct { + + // Data of the user ssh key. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // User ssh key will be no longer valid after expiration timestamp. + // +kubebuilder:validation:Optional + ExpiresAt *string `json:"expiresAt,omitempty" tf:"expires_at,omitempty"` + + // Name of the user ssh key. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Organization that the user ssh key belongs to. + // +kubebuilder:validation:Optional + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + + // Subject that the user ssh key belongs to. + // +kubebuilder:validation:Optional + SubjectID *string `json:"subjectId,omitempty" tf:"subject_id,omitempty"` +} + +// UserSSHKeySpec defines the desired state of UserSSHKey +type UserSSHKeySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserSSHKeyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserSSHKeyInitParameters `json:"initProvider,omitempty"` +} + +// UserSSHKeyStatus defines the observed state of UserSSHKey. +type UserSSHKeyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserSSHKeyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// UserSSHKey is the Schema for the UserSSHKeys API. Allows management of User Ssh Keys within an existing Yandex.Cloud Organization and Subject. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type UserSSHKey struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.data) || (has(self.initProvider) && has(self.initProvider.data))",message="spec.forProvider.data is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.organizationId) || (has(self.initProvider) && has(self.initProvider.organizationId))",message="spec.forProvider.organizationId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.subjectId) || (has(self.initProvider) && has(self.initProvider.subjectId))",message="spec.forProvider.subjectId is a required parameter" + Spec UserSSHKeySpec `json:"spec"` + Status UserSSHKeyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserSSHKeyList contains a list of UserSSHKeys +type UserSSHKeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UserSSHKey `json:"items"` +} + +// Repository type metadata. +var ( + UserSSHKey_Kind = "UserSSHKey" + UserSSHKey_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: UserSSHKey_Kind}.String() + UserSSHKey_KindAPIVersion = UserSSHKey_Kind + "." + CRDGroupVersion.String() + UserSSHKey_GroupVersionKind = CRDGroupVersion.WithKind(UserSSHKey_Kind) +) + +func init() { + SchemeBuilder.Register(&UserSSHKey{}, &UserSSHKeyList{}) +} diff --git a/apis/resourcemanager/v1alpha1/zz_cloud_terraformed.go b/apis/resourcemanager/v1alpha1/zz_cloud_terraformed.go index 6da9b4f..d7cd700 100755 --- a/apis/resourcemanager/v1alpha1/zz_cloud_terraformed.go +++ b/apis/resourcemanager/v1alpha1/zz_cloud_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Cloud func (mg *Cloud) GetTerraformResourceType() string { - return "yandex_resourcemanager_cloud" + return "yandex_resourcemanager_cloud" } // GetConnectionDetailsMapping for this Cloud func (tr *Cloud) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Cloud func (tr *Cloud) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Cloud func (tr *Cloud) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Cloud func (tr *Cloud) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Cloud func (tr *Cloud) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Cloud func (tr *Cloud) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Cloud func (tr *Cloud) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Cloud func (tr *Cloud) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Cloud using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Cloud) LateInitialize(attrs []byte) (bool, error) { - params := &CloudParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &CloudParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Cloud) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/resourcemanager/v1alpha1/zz_cloud_types.go b/apis/resourcemanager/v1alpha1/zz_cloud_types.go index f80b35d..ae6f561 100755 --- a/apis/resourcemanager/v1alpha1/zz_cloud_types.go +++ b/apis/resourcemanager/v1alpha1/zz_cloud_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,77 +7,67 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type CloudInitParameters struct { + // A description of the Cloud. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A description of the Cloud. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// A set of key/value label pairs to assign to the Cloud. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Cloud. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// The name of the Cloud. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the Cloud. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Yandex.Cloud Organization that the cloud belongs to. If value is omitted, the default provider Organization ID is used. -OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + // Yandex.Cloud Organization that the cloud belongs to. If value is omitted, the default provider Organization ID is used. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` } - type CloudObservation struct { + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // A description of the Cloud. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` - -// A description of the Cloud. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the Cloud. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Cloud. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// The name of the Cloud. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the Cloud. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Yandex.Cloud Organization that the cloud belongs to. If value is omitted, the default provider Organization ID is used. -OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + // Yandex.Cloud Organization that the cloud belongs to. If value is omitted, the default provider Organization ID is used. + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` } - type CloudParameters struct { + // A description of the Cloud. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A description of the Cloud. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// A set of key/value label pairs to assign to the Cloud. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Cloud. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// The name of the Cloud. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the Cloud. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Yandex.Cloud Organization that the cloud belongs to. If value is omitted, the default provider Organization ID is used. -// +kubebuilder:validation:Optional -OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` + // Yandex.Cloud Organization that the cloud belongs to. If value is omitted, the default provider Organization ID is used. + // +kubebuilder:validation:Optional + OrganizationID *string `json:"organizationId,omitempty" tf:"organization_id,omitempty"` } // CloudSpec defines the desired state of Cloud type CloudSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider CloudParameters `json:"forProvider"` + ForProvider CloudParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -90,20 +78,19 @@ type CloudSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider CloudInitParameters `json:"initProvider,omitempty"` + InitProvider CloudInitParameters `json:"initProvider,omitempty"` } // CloudStatus defines the observed state of Cloud. type CloudStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider CloudObservation `json:"atProvider,omitempty"` + AtProvider CloudObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Cloud is the Schema for the Clouds API. Allows management of the Cloud resource. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/resourcemanager/v1alpha1/zz_cloudiambinding_terraformed.go b/apis/resourcemanager/v1alpha1/zz_cloudiambinding_terraformed.go index b66dc85..03a5a30 100755 --- a/apis/resourcemanager/v1alpha1/zz_cloudiambinding_terraformed.go +++ b/apis/resourcemanager/v1alpha1/zz_cloudiambinding_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this CloudIAMBinding func (mg *CloudIAMBinding) GetTerraformResourceType() string { - return "yandex_resourcemanager_cloud_iam_binding" + return "yandex_resourcemanager_cloud_iam_binding" } // GetConnectionDetailsMapping for this CloudIAMBinding func (tr *CloudIAMBinding) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this CloudIAMBinding func (tr *CloudIAMBinding) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this CloudIAMBinding func (tr *CloudIAMBinding) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this CloudIAMBinding func (tr *CloudIAMBinding) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this CloudIAMBinding func (tr *CloudIAMBinding) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this CloudIAMBinding func (tr *CloudIAMBinding) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this CloudIAMBinding func (tr *CloudIAMBinding) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this CloudIAMBinding func (tr *CloudIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this CloudIAMBinding using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *CloudIAMBinding) LateInitialize(attrs []byte) (bool, error) { - params := &CloudIAMBindingParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &CloudIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *CloudIAMBinding) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/resourcemanager/v1alpha1/zz_cloudiambinding_types.go b/apis/resourcemanager/v1alpha1/zz_cloudiambinding_types.go index 0dd8759..0528327 100755 --- a/apis/resourcemanager/v1alpha1/zz_cloudiambinding_types.go +++ b/apis/resourcemanager/v1alpha1/zz_cloudiambinding_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,90 +7,81 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type CloudIAMBindingInitParameters struct { + // ID of the cloud to attach the policy to. + // +crossplane:generate:reference:type=Cloud + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// ID of the cloud to attach the policy to. -// +crossplane:generate:reference:type=Cloud -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + // Reference to a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` -// Reference to a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` + // Selector for a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` -// Selector for a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // The role that should be assigned. Only one yandex_resourcemanager_cloud_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role that should be assigned. Only one yandex_resourcemanager_cloud_iam_binding can be used per role. -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type CloudIAMBindingObservation struct { + // ID of the cloud to attach the policy to. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// ID of the cloud to attach the policy to. -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be assigned. Only one yandex_resourcemanager_cloud_iam_binding can be used per role. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be assigned. Only one yandex_resourcemanager_cloud_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type CloudIAMBindingParameters struct { + // ID of the cloud to attach the policy to. + // +crossplane:generate:reference:type=Cloud + // +kubebuilder:validation:Optional + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// ID of the cloud to attach the policy to. -// +crossplane:generate:reference:type=Cloud -// +kubebuilder:validation:Optional -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` - -// Reference to a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` + // Reference to a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` -// Selector for a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` + // Selector for a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` -// An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: -// +kubebuilder:validation:Optional -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // An array of identities that will be granted the privilege in the role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be assigned. Only one yandex_resourcemanager_cloud_iam_binding can be used per role. -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be assigned. Only one yandex_resourcemanager_cloud_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // CloudIAMBindingSpec defines the desired state of CloudIAMBinding type CloudIAMBindingSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider CloudIAMBindingParameters `json:"forProvider"` + ForProvider CloudIAMBindingParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -103,20 +92,19 @@ type CloudIAMBindingSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider CloudIAMBindingInitParameters `json:"initProvider,omitempty"` + InitProvider CloudIAMBindingInitParameters `json:"initProvider,omitempty"` } // CloudIAMBindingStatus defines the observed state of CloudIAMBinding. type CloudIAMBindingStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider CloudIAMBindingObservation `json:"atProvider,omitempty"` + AtProvider CloudIAMBindingObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // CloudIAMBinding is the Schema for the CloudIAMBindings API. Allows management of a single IAM binding for a Yandex Resource Manager cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -126,10 +114,10 @@ type CloudIAMBindingStatus struct { type CloudIAMBinding struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec CloudIAMBindingSpec `json:"spec"` - Status CloudIAMBindingStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec CloudIAMBindingSpec `json:"spec"` + Status CloudIAMBindingStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/resourcemanager/v1alpha1/zz_cloudiammember_terraformed.go b/apis/resourcemanager/v1alpha1/zz_cloudiammember_terraformed.go index 44a3ba3..528f2bb 100755 --- a/apis/resourcemanager/v1alpha1/zz_cloudiammember_terraformed.go +++ b/apis/resourcemanager/v1alpha1/zz_cloudiammember_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this CloudIAMMember func (mg *CloudIAMMember) GetTerraformResourceType() string { - return "yandex_resourcemanager_cloud_iam_member" + return "yandex_resourcemanager_cloud_iam_member" } // GetConnectionDetailsMapping for this CloudIAMMember func (tr *CloudIAMMember) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this CloudIAMMember func (tr *CloudIAMMember) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this CloudIAMMember func (tr *CloudIAMMember) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this CloudIAMMember func (tr *CloudIAMMember) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this CloudIAMMember func (tr *CloudIAMMember) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this CloudIAMMember func (tr *CloudIAMMember) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this CloudIAMMember func (tr *CloudIAMMember) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this CloudIAMMember func (tr *CloudIAMMember) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this CloudIAMMember using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *CloudIAMMember) LateInitialize(attrs []byte) (bool, error) { - params := &CloudIAMMemberParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &CloudIAMMemberParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *CloudIAMMember) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/resourcemanager/v1alpha1/zz_cloudiammember_types.go b/apis/resourcemanager/v1alpha1/zz_cloudiammember_types.go index 0a015e0..aef3af9 100755 --- a/apis/resourcemanager/v1alpha1/zz_cloudiammember_types.go +++ b/apis/resourcemanager/v1alpha1/zz_cloudiammember_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,87 +7,78 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type CloudIAMMemberInitParameters struct { + // ID of the cloud to attach a policy to. + // +crossplane:generate:reference:type=Cloud + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// ID of the cloud to attach a policy to. -// +crossplane:generate:reference:type=Cloud -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + // Reference to a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` -// Reference to a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` + // Selector for a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` -// Selector for a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + Member *string `json:"member,omitempty" tf:"member,omitempty"` -// The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: -Member *string `json:"member,omitempty" tf:"member,omitempty"` + // The role that should be assigned. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role that should be assigned. -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type CloudIAMMemberObservation struct { + // ID of the cloud to attach a policy to. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// ID of the cloud to attach a policy to. -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: -Member *string `json:"member,omitempty" tf:"member,omitempty"` + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + Member *string `json:"member,omitempty" tf:"member,omitempty"` -// The role that should be assigned. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be assigned. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type CloudIAMMemberParameters struct { + // ID of the cloud to attach a policy to. + // +crossplane:generate:reference:type=Cloud + // +kubebuilder:validation:Optional + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// ID of the cloud to attach a policy to. -// +crossplane:generate:reference:type=Cloud -// +kubebuilder:validation:Optional -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` - -// Reference to a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` + // Reference to a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` -// Selector for a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` + // Selector for a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` -// The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: -// +kubebuilder:validation:Optional -Member *string `json:"member,omitempty" tf:"member,omitempty"` + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + // +kubebuilder:validation:Optional + Member *string `json:"member,omitempty" tf:"member,omitempty"` -// The role that should be assigned. -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be assigned. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // CloudIAMMemberSpec defines the desired state of CloudIAMMember type CloudIAMMemberSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider CloudIAMMemberParameters `json:"forProvider"` + ForProvider CloudIAMMemberParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -100,20 +89,19 @@ type CloudIAMMemberSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider CloudIAMMemberInitParameters `json:"initProvider,omitempty"` + InitProvider CloudIAMMemberInitParameters `json:"initProvider,omitempty"` } // CloudIAMMemberStatus defines the observed state of CloudIAMMember. type CloudIAMMemberStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider CloudIAMMemberObservation `json:"atProvider,omitempty"` + AtProvider CloudIAMMemberObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // CloudIAMMember is the Schema for the CloudIAMMembers API. Allows management of a single member for a single IAM binding on a Yandex Resource Manager cloud. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -123,10 +111,10 @@ type CloudIAMMemberStatus struct { type CloudIAMMember struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.member) || (has(self.initProvider) && has(self.initProvider.member))",message="spec.forProvider.member is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec CloudIAMMemberSpec `json:"spec"` - Status CloudIAMMemberStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.member) || (has(self.initProvider) && has(self.initProvider.member))",message="spec.forProvider.member is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec CloudIAMMemberSpec `json:"spec"` + Status CloudIAMMemberStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/resourcemanager/v1alpha1/zz_folder_terraformed.go b/apis/resourcemanager/v1alpha1/zz_folder_terraformed.go index 3ca7cf8..2e28781 100755 --- a/apis/resourcemanager/v1alpha1/zz_folder_terraformed.go +++ b/apis/resourcemanager/v1alpha1/zz_folder_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Folder func (mg *Folder) GetTerraformResourceType() string { - return "yandex_resourcemanager_folder" + return "yandex_resourcemanager_folder" } // GetConnectionDetailsMapping for this Folder func (tr *Folder) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Folder func (tr *Folder) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Folder func (tr *Folder) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Folder func (tr *Folder) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Folder func (tr *Folder) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Folder func (tr *Folder) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Folder func (tr *Folder) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Folder func (tr *Folder) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Folder using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Folder) LateInitialize(attrs []byte) (bool, error) { - params := &FolderParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &FolderParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Folder) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/resourcemanager/v1alpha1/zz_folder_types.go b/apis/resourcemanager/v1alpha1/zz_folder_types.go index e4fd27e..8c05475 100755 --- a/apis/resourcemanager/v1alpha1/zz_folder_types.go +++ b/apis/resourcemanager/v1alpha1/zz_folder_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,95 +7,86 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type FolderInitParameters struct { + // Cloud that the resource belongs to. If value is omitted, the default provider Cloud ID is used. + // +crossplane:generate:reference:type=Cloud + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// Cloud that the resource belongs to. If value is omitted, the default provider Cloud ID is used. -// +crossplane:generate:reference:type=Cloud -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + // Reference to a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` -// Reference to a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` + // Selector for a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` -// Selector for a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` + // A description of the Folder. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A description of the Folder. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // A set of key/value label pairs to assign to the Folder. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Folder. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` - -// The name of the Folder. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the Folder. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type FolderObservation struct { + // Cloud that the resource belongs to. If value is omitted, the default provider Cloud ID is used. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// Cloud that the resource belongs to. If value is omitted, the default provider Cloud ID is used. -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` - -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// A description of the Folder. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // A description of the Folder. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// A set of key/value label pairs to assign to the Folder. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Folder. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// The name of the Folder. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the Folder. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } - type FolderParameters struct { + // Cloud that the resource belongs to. If value is omitted, the default provider Cloud ID is used. + // +crossplane:generate:reference:type=Cloud + // +kubebuilder:validation:Optional + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// Cloud that the resource belongs to. If value is omitted, the default provider Cloud ID is used. -// +crossplane:generate:reference:type=Cloud -// +kubebuilder:validation:Optional -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` - -// Reference to a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` + // Reference to a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDRef *v1.Reference `json:"cloudIdRef,omitempty" tf:"-"` -// Selector for a Cloud to populate cloudId. -// +kubebuilder:validation:Optional -CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` + // Selector for a Cloud to populate cloudId. + // +kubebuilder:validation:Optional + CloudIDSelector *v1.Selector `json:"cloudIdSelector,omitempty" tf:"-"` -// A description of the Folder. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // A description of the Folder. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// A set of key/value label pairs to assign to the Folder. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Folder. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// The name of the Folder. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The name of the Folder. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } // FolderSpec defines the desired state of Folder type FolderSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider FolderParameters `json:"forProvider"` + ForProvider FolderParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -108,20 +97,19 @@ type FolderSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider FolderInitParameters `json:"initProvider,omitempty"` + InitProvider FolderInitParameters `json:"initProvider,omitempty"` } // FolderStatus defines the observed state of Folder. type FolderStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider FolderObservation `json:"atProvider,omitempty"` + AtProvider FolderObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Folder is the Schema for the Folders API. Allows management of the Cloud Folder. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/resourcemanager/v1alpha1/zz_folderiambinding_terraformed.go b/apis/resourcemanager/v1alpha1/zz_folderiambinding_terraformed.go index 6f7c503..1460f6a 100755 --- a/apis/resourcemanager/v1alpha1/zz_folderiambinding_terraformed.go +++ b/apis/resourcemanager/v1alpha1/zz_folderiambinding_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this FolderIAMBinding func (mg *FolderIAMBinding) GetTerraformResourceType() string { - return "yandex_resourcemanager_folder_iam_binding" + return "yandex_resourcemanager_folder_iam_binding" } // GetConnectionDetailsMapping for this FolderIAMBinding func (tr *FolderIAMBinding) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this FolderIAMBinding func (tr *FolderIAMBinding) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this FolderIAMBinding func (tr *FolderIAMBinding) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this FolderIAMBinding func (tr *FolderIAMBinding) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this FolderIAMBinding func (tr *FolderIAMBinding) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this FolderIAMBinding func (tr *FolderIAMBinding) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this FolderIAMBinding func (tr *FolderIAMBinding) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this FolderIAMBinding func (tr *FolderIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this FolderIAMBinding using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *FolderIAMBinding) LateInitialize(attrs []byte) (bool, error) { - params := &FolderIAMBindingParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &FolderIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *FolderIAMBinding) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/resourcemanager/v1alpha1/zz_folderiambinding_types.go b/apis/resourcemanager/v1alpha1/zz_folderiambinding_types.go index 51a7d10..403a15f 100755 --- a/apis/resourcemanager/v1alpha1/zz_folderiambinding_types.go +++ b/apis/resourcemanager/v1alpha1/zz_folderiambinding_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,90 +7,81 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type FolderIAMBindingInitParameters struct { + // ID of the folder to attach a policy to. + // +crossplane:generate:reference:type=Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder to attach a policy to. -// +crossplane:generate:reference:type=Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // An array of identities that will be granted the privilege that is specified in the role field. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// An array of identities that will be granted the privilege that is specified in the role field. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // The role that should be assigned. Only one yandex_resourcemanager_folder_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role that should be assigned. Only one yandex_resourcemanager_folder_iam_binding can be used per role. -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type FolderIAMBindingObservation struct { + // ID of the folder to attach a policy to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder to attach a policy to. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// An array of identities that will be granted the privilege that is specified in the role field. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // An array of identities that will be granted the privilege that is specified in the role field. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be assigned. Only one yandex_resourcemanager_folder_iam_binding can be used per role. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be assigned. Only one yandex_resourcemanager_folder_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type FolderIAMBindingParameters struct { + // ID of the folder to attach a policy to. + // +crossplane:generate:reference:type=Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder to attach a policy to. -// +crossplane:generate:reference:type=Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// An array of identities that will be granted the privilege that is specified in the role field. Each entry can have one of the following values: -// +kubebuilder:validation:Optional -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // An array of identities that will be granted the privilege that is specified in the role field. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be assigned. Only one yandex_resourcemanager_folder_iam_binding can be used per role. -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be assigned. Only one yandex_resourcemanager_folder_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // FolderIAMBindingSpec defines the desired state of FolderIAMBinding type FolderIAMBindingSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider FolderIAMBindingParameters `json:"forProvider"` + ForProvider FolderIAMBindingParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -103,20 +92,19 @@ type FolderIAMBindingSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider FolderIAMBindingInitParameters `json:"initProvider,omitempty"` + InitProvider FolderIAMBindingInitParameters `json:"initProvider,omitempty"` } // FolderIAMBindingStatus defines the observed state of FolderIAMBinding. type FolderIAMBindingStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider FolderIAMBindingObservation `json:"atProvider,omitempty"` + AtProvider FolderIAMBindingObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // FolderIAMBinding is the Schema for the FolderIAMBindings API. Allows management of a single IAM binding for a Yandex Resource Manager folder. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -126,10 +114,10 @@ type FolderIAMBindingStatus struct { type FolderIAMBinding struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec FolderIAMBindingSpec `json:"spec"` - Status FolderIAMBindingStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec FolderIAMBindingSpec `json:"spec"` + Status FolderIAMBindingStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/resourcemanager/v1alpha1/zz_folderiammember_terraformed.go b/apis/resourcemanager/v1alpha1/zz_folderiammember_terraformed.go index 9036160..ae5c173 100755 --- a/apis/resourcemanager/v1alpha1/zz_folderiammember_terraformed.go +++ b/apis/resourcemanager/v1alpha1/zz_folderiammember_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this FolderIAMMember func (mg *FolderIAMMember) GetTerraformResourceType() string { - return "yandex_resourcemanager_folder_iam_member" + return "yandex_resourcemanager_folder_iam_member" } // GetConnectionDetailsMapping for this FolderIAMMember func (tr *FolderIAMMember) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this FolderIAMMember func (tr *FolderIAMMember) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this FolderIAMMember func (tr *FolderIAMMember) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this FolderIAMMember func (tr *FolderIAMMember) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this FolderIAMMember func (tr *FolderIAMMember) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this FolderIAMMember func (tr *FolderIAMMember) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this FolderIAMMember func (tr *FolderIAMMember) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this FolderIAMMember func (tr *FolderIAMMember) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this FolderIAMMember using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *FolderIAMMember) LateInitialize(attrs []byte) (bool, error) { - params := &FolderIAMMemberParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &FolderIAMMemberParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *FolderIAMMember) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/resourcemanager/v1alpha1/zz_folderiammember_types.go b/apis/resourcemanager/v1alpha1/zz_folderiammember_types.go index 38f7de0..f3da6c6 100755 --- a/apis/resourcemanager/v1alpha1/zz_folderiammember_types.go +++ b/apis/resourcemanager/v1alpha1/zz_folderiammember_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,87 +7,78 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type FolderIAMMemberInitParameters struct { + // ID of the folder to attach a policy to. + // +crossplane:generate:reference:type=Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder to attach a policy to. -// +crossplane:generate:reference:type=Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + Member *string `json:"member,omitempty" tf:"member,omitempty"` -// The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: -Member *string `json:"member,omitempty" tf:"member,omitempty"` + // The role that should be assigned. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role that should be assigned. -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type FolderIAMMemberObservation struct { + // ID of the folder to attach a policy to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder to attach a policy to. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: -Member *string `json:"member,omitempty" tf:"member,omitempty"` + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + Member *string `json:"member,omitempty" tf:"member,omitempty"` -// The role that should be assigned. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be assigned. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type FolderIAMMemberParameters struct { + // ID of the folder to attach a policy to. + // +crossplane:generate:reference:type=Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder to attach a policy to. -// +crossplane:generate:reference:type=Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: -// +kubebuilder:validation:Optional -Member *string `json:"member,omitempty" tf:"member,omitempty"` + // The identity that will be granted the privilege that is specified in the role field. This field can have one of the following values: + // +kubebuilder:validation:Optional + Member *string `json:"member,omitempty" tf:"member,omitempty"` -// The role that should be assigned. -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be assigned. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // FolderIAMMemberSpec defines the desired state of FolderIAMMember type FolderIAMMemberSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider FolderIAMMemberParameters `json:"forProvider"` + ForProvider FolderIAMMemberParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -100,20 +89,19 @@ type FolderIAMMemberSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider FolderIAMMemberInitParameters `json:"initProvider,omitempty"` + InitProvider FolderIAMMemberInitParameters `json:"initProvider,omitempty"` } // FolderIAMMemberStatus defines the observed state of FolderIAMMember. type FolderIAMMemberStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider FolderIAMMemberObservation `json:"atProvider,omitempty"` + AtProvider FolderIAMMemberObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // FolderIAMMember is the Schema for the FolderIAMMembers API. Allows management of a single member for a single IAM binding for a Yandex Resource Manager folder. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -123,10 +111,10 @@ type FolderIAMMemberStatus struct { type FolderIAMMember struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.member) || (has(self.initProvider) && has(self.initProvider.member))",message="spec.forProvider.member is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec FolderIAMMemberSpec `json:"spec"` - Status FolderIAMMemberStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.member) || (has(self.initProvider) && has(self.initProvider.member))",message="spec.forProvider.member is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec FolderIAMMemberSpec `json:"spec"` + Status FolderIAMMemberStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/resourcemanager/v1alpha1/zz_folderiampolicy_terraformed.go b/apis/resourcemanager/v1alpha1/zz_folderiampolicy_terraformed.go index ec2da40..b39cbcc 100755 --- a/apis/resourcemanager/v1alpha1/zz_folderiampolicy_terraformed.go +++ b/apis/resourcemanager/v1alpha1/zz_folderiampolicy_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this FolderIAMPolicy func (mg *FolderIAMPolicy) GetTerraformResourceType() string { - return "yandex_resourcemanager_folder_iam_policy" + return "yandex_resourcemanager_folder_iam_policy" } // GetConnectionDetailsMapping for this FolderIAMPolicy func (tr *FolderIAMPolicy) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this FolderIAMPolicy func (tr *FolderIAMPolicy) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this FolderIAMPolicy func (tr *FolderIAMPolicy) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this FolderIAMPolicy func (tr *FolderIAMPolicy) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this FolderIAMPolicy func (tr *FolderIAMPolicy) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this FolderIAMPolicy func (tr *FolderIAMPolicy) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this FolderIAMPolicy func (tr *FolderIAMPolicy) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this FolderIAMPolicy func (tr *FolderIAMPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this FolderIAMPolicy using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *FolderIAMPolicy) LateInitialize(attrs []byte) (bool, error) { - params := &FolderIAMPolicyParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &FolderIAMPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *FolderIAMPolicy) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/resourcemanager/v1alpha1/zz_folderiampolicy_types.go b/apis/resourcemanager/v1alpha1/zz_folderiampolicy_types.go index 5fcb754..6b72e3f 100755 --- a/apis/resourcemanager/v1alpha1/zz_folderiampolicy_types.go +++ b/apis/resourcemanager/v1alpha1/zz_folderiampolicy_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,70 +7,61 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type FolderIAMPolicyInitParameters struct { + // ID of the folder that the policy is attached to. + // +crossplane:generate:reference:type=Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder that the policy is attached to. -// +crossplane:generate:reference:type=Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` - -// The yandex_iam_policy data source that represents the IAM policy that will be applied to the folder. This policy overrides any existing policy applied to the folder. -PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` + // The yandex_iam_policy data source that represents the IAM policy that will be applied to the folder. This policy overrides any existing policy applied to the folder. + PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` } - type FolderIAMPolicyObservation struct { + // ID of the folder that the policy is attached to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder that the policy is attached to. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// The yandex_iam_policy data source that represents the IAM policy that will be applied to the folder. This policy overrides any existing policy applied to the folder. -PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` + // The yandex_iam_policy data source that represents the IAM policy that will be applied to the folder. This policy overrides any existing policy applied to the folder. + PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` } - type FolderIAMPolicyParameters struct { + // ID of the folder that the policy is attached to. + // +crossplane:generate:reference:type=Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder that the policy is attached to. -// +crossplane:generate:reference:type=Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` - -// Reference to a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// The yandex_iam_policy data source that represents the IAM policy that will be applied to the folder. This policy overrides any existing policy applied to the folder. -// +kubebuilder:validation:Optional -PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` + // The yandex_iam_policy data source that represents the IAM policy that will be applied to the folder. This policy overrides any existing policy applied to the folder. + // +kubebuilder:validation:Optional + PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` } // FolderIAMPolicySpec defines the desired state of FolderIAMPolicy type FolderIAMPolicySpec struct { v1.ResourceSpec `json:",inline"` - ForProvider FolderIAMPolicyParameters `json:"forProvider"` + ForProvider FolderIAMPolicyParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -83,20 +72,19 @@ type FolderIAMPolicySpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider FolderIAMPolicyInitParameters `json:"initProvider,omitempty"` + InitProvider FolderIAMPolicyInitParameters `json:"initProvider,omitempty"` } // FolderIAMPolicyStatus defines the observed state of FolderIAMPolicy. type FolderIAMPolicyStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider FolderIAMPolicyObservation `json:"atProvider,omitempty"` + AtProvider FolderIAMPolicyObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // FolderIAMPolicy is the Schema for the FolderIAMPolicys API. Allows management of the IAM policy for a Yandex Resource Manager folder. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -106,9 +94,9 @@ type FolderIAMPolicyStatus struct { type FolderIAMPolicy struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyData) || (has(self.initProvider) && has(self.initProvider.policyData))",message="spec.forProvider.policyData is a required parameter" - Spec FolderIAMPolicySpec `json:"spec"` - Status FolderIAMPolicyStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyData) || (has(self.initProvider) && has(self.initProvider.policyData))",message="spec.forProvider.policyData is a required parameter" + Spec FolderIAMPolicySpec `json:"spec"` + Status FolderIAMPolicyStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/resourcemanager/v1alpha1/zz_generated.conversion_hubs.go b/apis/resourcemanager/v1alpha1/zz_generated.conversion_hubs.go index ed2d9b8..df947aa 100755 --- a/apis/resourcemanager/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/resourcemanager/v1alpha1/zz_generated.conversion_hubs.go @@ -1,28 +1,24 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *Cloud) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *Cloud) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *CloudIAMBinding) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *CloudIAMMember) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *CloudIAMBinding) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *Folder) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *CloudIAMMember) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *FolderIAMBinding) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *Folder) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *FolderIAMMember) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *FolderIAMBinding) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *FolderIAMPolicy) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *FolderIAMMember) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *FolderIAMPolicy) Hub() {} diff --git a/apis/resourcemanager/v1alpha1/zz_generated.deepcopy.go b/apis/resourcemanager/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..a4a3f71 --- /dev/null +++ b/apis/resourcemanager/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1640 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cloud) DeepCopyInto(out *Cloud) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cloud. +func (in *Cloud) DeepCopy() *Cloud { + if in == nil { + return nil + } + out := new(Cloud) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cloud) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMBinding) DeepCopyInto(out *CloudIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMBinding. +func (in *CloudIAMBinding) DeepCopy() *CloudIAMBinding { + if in == nil { + return nil + } + out := new(CloudIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMBindingInitParameters) DeepCopyInto(out *CloudIAMBindingInitParameters) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.CloudIDRef != nil { + in, out := &in.CloudIDRef, &out.CloudIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudIDSelector != nil { + in, out := &in.CloudIDSelector, &out.CloudIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMBindingInitParameters. +func (in *CloudIAMBindingInitParameters) DeepCopy() *CloudIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(CloudIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMBindingList) DeepCopyInto(out *CloudIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMBindingList. +func (in *CloudIAMBindingList) DeepCopy() *CloudIAMBindingList { + if in == nil { + return nil + } + out := new(CloudIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMBindingObservation) DeepCopyInto(out *CloudIAMBindingObservation) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMBindingObservation. +func (in *CloudIAMBindingObservation) DeepCopy() *CloudIAMBindingObservation { + if in == nil { + return nil + } + out := new(CloudIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMBindingParameters) DeepCopyInto(out *CloudIAMBindingParameters) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.CloudIDRef != nil { + in, out := &in.CloudIDRef, &out.CloudIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudIDSelector != nil { + in, out := &in.CloudIDSelector, &out.CloudIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMBindingParameters. +func (in *CloudIAMBindingParameters) DeepCopy() *CloudIAMBindingParameters { + if in == nil { + return nil + } + out := new(CloudIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMBindingSpec) DeepCopyInto(out *CloudIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMBindingSpec. +func (in *CloudIAMBindingSpec) DeepCopy() *CloudIAMBindingSpec { + if in == nil { + return nil + } + out := new(CloudIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMBindingStatus) DeepCopyInto(out *CloudIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMBindingStatus. +func (in *CloudIAMBindingStatus) DeepCopy() *CloudIAMBindingStatus { + if in == nil { + return nil + } + out := new(CloudIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMMember) DeepCopyInto(out *CloudIAMMember) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMMember. +func (in *CloudIAMMember) DeepCopy() *CloudIAMMember { + if in == nil { + return nil + } + out := new(CloudIAMMember) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudIAMMember) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMMemberInitParameters) DeepCopyInto(out *CloudIAMMemberInitParameters) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.CloudIDRef != nil { + in, out := &in.CloudIDRef, &out.CloudIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudIDSelector != nil { + in, out := &in.CloudIDSelector, &out.CloudIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMMemberInitParameters. +func (in *CloudIAMMemberInitParameters) DeepCopy() *CloudIAMMemberInitParameters { + if in == nil { + return nil + } + out := new(CloudIAMMemberInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMMemberList) DeepCopyInto(out *CloudIAMMemberList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudIAMMember, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMMemberList. +func (in *CloudIAMMemberList) DeepCopy() *CloudIAMMemberList { + if in == nil { + return nil + } + out := new(CloudIAMMemberList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudIAMMemberList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMMemberObservation) DeepCopyInto(out *CloudIAMMemberObservation) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMMemberObservation. +func (in *CloudIAMMemberObservation) DeepCopy() *CloudIAMMemberObservation { + if in == nil { + return nil + } + out := new(CloudIAMMemberObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMMemberParameters) DeepCopyInto(out *CloudIAMMemberParameters) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.CloudIDRef != nil { + in, out := &in.CloudIDRef, &out.CloudIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudIDSelector != nil { + in, out := &in.CloudIDSelector, &out.CloudIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMMemberParameters. +func (in *CloudIAMMemberParameters) DeepCopy() *CloudIAMMemberParameters { + if in == nil { + return nil + } + out := new(CloudIAMMemberParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMMemberSpec) DeepCopyInto(out *CloudIAMMemberSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMMemberSpec. +func (in *CloudIAMMemberSpec) DeepCopy() *CloudIAMMemberSpec { + if in == nil { + return nil + } + out := new(CloudIAMMemberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudIAMMemberStatus) DeepCopyInto(out *CloudIAMMemberStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIAMMemberStatus. +func (in *CloudIAMMemberStatus) DeepCopy() *CloudIAMMemberStatus { + if in == nil { + return nil + } + out := new(CloudIAMMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudInitParameters) DeepCopyInto(out *CloudInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInitParameters. +func (in *CloudInitParameters) DeepCopy() *CloudInitParameters { + if in == nil { + return nil + } + out := new(CloudInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudList) DeepCopyInto(out *CloudList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cloud, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudList. +func (in *CloudList) DeepCopy() *CloudList { + if in == nil { + return nil + } + out := new(CloudList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudObservation) DeepCopyInto(out *CloudObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudObservation. +func (in *CloudObservation) DeepCopy() *CloudObservation { + if in == nil { + return nil + } + out := new(CloudObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudParameters) DeepCopyInto(out *CloudParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrganizationID != nil { + in, out := &in.OrganizationID, &out.OrganizationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudParameters. +func (in *CloudParameters) DeepCopy() *CloudParameters { + if in == nil { + return nil + } + out := new(CloudParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudSpec) DeepCopyInto(out *CloudSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudSpec. +func (in *CloudSpec) DeepCopy() *CloudSpec { + if in == nil { + return nil + } + out := new(CloudSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudStatus) DeepCopyInto(out *CloudStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudStatus. +func (in *CloudStatus) DeepCopy() *CloudStatus { + if in == nil { + return nil + } + out := new(CloudStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Folder) DeepCopyInto(out *Folder) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Folder. +func (in *Folder) DeepCopy() *Folder { + if in == nil { + return nil + } + out := new(Folder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Folder) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMBinding) DeepCopyInto(out *FolderIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMBinding. +func (in *FolderIAMBinding) DeepCopy() *FolderIAMBinding { + if in == nil { + return nil + } + out := new(FolderIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FolderIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMBindingInitParameters) DeepCopyInto(out *FolderIAMBindingInitParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMBindingInitParameters. +func (in *FolderIAMBindingInitParameters) DeepCopy() *FolderIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(FolderIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMBindingList) DeepCopyInto(out *FolderIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FolderIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMBindingList. +func (in *FolderIAMBindingList) DeepCopy() *FolderIAMBindingList { + if in == nil { + return nil + } + out := new(FolderIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FolderIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMBindingObservation) DeepCopyInto(out *FolderIAMBindingObservation) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMBindingObservation. +func (in *FolderIAMBindingObservation) DeepCopy() *FolderIAMBindingObservation { + if in == nil { + return nil + } + out := new(FolderIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMBindingParameters) DeepCopyInto(out *FolderIAMBindingParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMBindingParameters. +func (in *FolderIAMBindingParameters) DeepCopy() *FolderIAMBindingParameters { + if in == nil { + return nil + } + out := new(FolderIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMBindingSpec) DeepCopyInto(out *FolderIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMBindingSpec. +func (in *FolderIAMBindingSpec) DeepCopy() *FolderIAMBindingSpec { + if in == nil { + return nil + } + out := new(FolderIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMBindingStatus) DeepCopyInto(out *FolderIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMBindingStatus. +func (in *FolderIAMBindingStatus) DeepCopy() *FolderIAMBindingStatus { + if in == nil { + return nil + } + out := new(FolderIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMMember) DeepCopyInto(out *FolderIAMMember) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMMember. +func (in *FolderIAMMember) DeepCopy() *FolderIAMMember { + if in == nil { + return nil + } + out := new(FolderIAMMember) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FolderIAMMember) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMMemberInitParameters) DeepCopyInto(out *FolderIAMMemberInitParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMMemberInitParameters. +func (in *FolderIAMMemberInitParameters) DeepCopy() *FolderIAMMemberInitParameters { + if in == nil { + return nil + } + out := new(FolderIAMMemberInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMMemberList) DeepCopyInto(out *FolderIAMMemberList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FolderIAMMember, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMMemberList. +func (in *FolderIAMMemberList) DeepCopy() *FolderIAMMemberList { + if in == nil { + return nil + } + out := new(FolderIAMMemberList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FolderIAMMemberList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMMemberObservation) DeepCopyInto(out *FolderIAMMemberObservation) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMMemberObservation. +func (in *FolderIAMMemberObservation) DeepCopy() *FolderIAMMemberObservation { + if in == nil { + return nil + } + out := new(FolderIAMMemberObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMMemberParameters) DeepCopyInto(out *FolderIAMMemberParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMMemberParameters. +func (in *FolderIAMMemberParameters) DeepCopy() *FolderIAMMemberParameters { + if in == nil { + return nil + } + out := new(FolderIAMMemberParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMMemberSpec) DeepCopyInto(out *FolderIAMMemberSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMMemberSpec. +func (in *FolderIAMMemberSpec) DeepCopy() *FolderIAMMemberSpec { + if in == nil { + return nil + } + out := new(FolderIAMMemberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMMemberStatus) DeepCopyInto(out *FolderIAMMemberStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMMemberStatus. +func (in *FolderIAMMemberStatus) DeepCopy() *FolderIAMMemberStatus { + if in == nil { + return nil + } + out := new(FolderIAMMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMPolicy) DeepCopyInto(out *FolderIAMPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMPolicy. +func (in *FolderIAMPolicy) DeepCopy() *FolderIAMPolicy { + if in == nil { + return nil + } + out := new(FolderIAMPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FolderIAMPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMPolicyInitParameters) DeepCopyInto(out *FolderIAMPolicyInitParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PolicyData != nil { + in, out := &in.PolicyData, &out.PolicyData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMPolicyInitParameters. +func (in *FolderIAMPolicyInitParameters) DeepCopy() *FolderIAMPolicyInitParameters { + if in == nil { + return nil + } + out := new(FolderIAMPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMPolicyList) DeepCopyInto(out *FolderIAMPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FolderIAMPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMPolicyList. +func (in *FolderIAMPolicyList) DeepCopy() *FolderIAMPolicyList { + if in == nil { + return nil + } + out := new(FolderIAMPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FolderIAMPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMPolicyObservation) DeepCopyInto(out *FolderIAMPolicyObservation) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PolicyData != nil { + in, out := &in.PolicyData, &out.PolicyData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMPolicyObservation. +func (in *FolderIAMPolicyObservation) DeepCopy() *FolderIAMPolicyObservation { + if in == nil { + return nil + } + out := new(FolderIAMPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMPolicyParameters) DeepCopyInto(out *FolderIAMPolicyParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PolicyData != nil { + in, out := &in.PolicyData, &out.PolicyData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMPolicyParameters. +func (in *FolderIAMPolicyParameters) DeepCopy() *FolderIAMPolicyParameters { + if in == nil { + return nil + } + out := new(FolderIAMPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMPolicySpec) DeepCopyInto(out *FolderIAMPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMPolicySpec. +func (in *FolderIAMPolicySpec) DeepCopy() *FolderIAMPolicySpec { + if in == nil { + return nil + } + out := new(FolderIAMPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderIAMPolicyStatus) DeepCopyInto(out *FolderIAMPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderIAMPolicyStatus. +func (in *FolderIAMPolicyStatus) DeepCopy() *FolderIAMPolicyStatus { + if in == nil { + return nil + } + out := new(FolderIAMPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderInitParameters) DeepCopyInto(out *FolderInitParameters) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.CloudIDRef != nil { + in, out := &in.CloudIDRef, &out.CloudIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudIDSelector != nil { + in, out := &in.CloudIDSelector, &out.CloudIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderInitParameters. +func (in *FolderInitParameters) DeepCopy() *FolderInitParameters { + if in == nil { + return nil + } + out := new(FolderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderList) DeepCopyInto(out *FolderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Folder, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderList. +func (in *FolderList) DeepCopy() *FolderList { + if in == nil { + return nil + } + out := new(FolderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FolderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderObservation) DeepCopyInto(out *FolderObservation) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderObservation. +func (in *FolderObservation) DeepCopy() *FolderObservation { + if in == nil { + return nil + } + out := new(FolderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderParameters) DeepCopyInto(out *FolderParameters) { + *out = *in + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.CloudIDRef != nil { + in, out := &in.CloudIDRef, &out.CloudIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudIDSelector != nil { + in, out := &in.CloudIDSelector, &out.CloudIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderParameters. +func (in *FolderParameters) DeepCopy() *FolderParameters { + if in == nil { + return nil + } + out := new(FolderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderSpec) DeepCopyInto(out *FolderSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderSpec. +func (in *FolderSpec) DeepCopy() *FolderSpec { + if in == nil { + return nil + } + out := new(FolderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FolderStatus) DeepCopyInto(out *FolderStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FolderStatus. +func (in *FolderStatus) DeepCopy() *FolderStatus { + if in == nil { + return nil + } + out := new(FolderStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/resourcemanager/v1alpha1/zz_generated.resolvers.go b/apis/resourcemanager/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..5a2bf38 --- /dev/null +++ b/apis/resourcemanager/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,262 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this CloudIAMBinding. +func (mg *CloudIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CloudID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CloudIDRef, + Selector: mg.Spec.ForProvider.CloudIDSelector, + To: reference.To{ + List: &CloudList{}, + Managed: &Cloud{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CloudID") + } + mg.Spec.ForProvider.CloudID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CloudIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CloudID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CloudIDRef, + Selector: mg.Spec.InitProvider.CloudIDSelector, + To: reference.To{ + List: &CloudList{}, + Managed: &Cloud{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CloudID") + } + mg.Spec.InitProvider.CloudID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CloudIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this CloudIAMMember. +func (mg *CloudIAMMember) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CloudID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CloudIDRef, + Selector: mg.Spec.ForProvider.CloudIDSelector, + To: reference.To{ + List: &CloudList{}, + Managed: &Cloud{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CloudID") + } + mg.Spec.ForProvider.CloudID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CloudIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CloudID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CloudIDRef, + Selector: mg.Spec.InitProvider.CloudIDSelector, + To: reference.To{ + List: &CloudList{}, + Managed: &Cloud{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CloudID") + } + mg.Spec.InitProvider.CloudID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CloudIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Folder. +func (mg *Folder) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CloudID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CloudIDRef, + Selector: mg.Spec.ForProvider.CloudIDSelector, + To: reference.To{ + List: &CloudList{}, + Managed: &Cloud{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CloudID") + } + mg.Spec.ForProvider.CloudID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CloudIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CloudID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CloudIDRef, + Selector: mg.Spec.InitProvider.CloudIDSelector, + To: reference.To{ + List: &CloudList{}, + Managed: &Cloud{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CloudID") + } + mg.Spec.InitProvider.CloudID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CloudIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FolderIAMBinding. +func (mg *FolderIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &FolderList{}, + Managed: &Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &FolderList{}, + Managed: &Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FolderIAMMember. +func (mg *FolderIAMMember) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &FolderList{}, + Managed: &Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &FolderList{}, + Managed: &Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FolderIAMPolicy. +func (mg *FolderIAMPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &FolderList{}, + Managed: &Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &FolderList{}, + Managed: &Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/resourcemanager/v1alpha1/zz_groupversion_info.go b/apis/resourcemanager/v1alpha1/zz_groupversion_info.go index 1fc22a6..f2bf6f4 100755 --- a/apis/resourcemanager/v1alpha1/zz_groupversion_info.go +++ b/apis/resourcemanager/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/serverless/v1alpha1/zz_container_terraformed.go b/apis/serverless/v1alpha1/zz_container_terraformed.go index 98a3bda..8f3000f 100755 --- a/apis/serverless/v1alpha1/zz_container_terraformed.go +++ b/apis/serverless/v1alpha1/zz_container_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Container func (mg *Container) GetTerraformResourceType() string { - return "yandex_serverless_container" + return "yandex_serverless_container" } // GetConnectionDetailsMapping for this Container func (tr *Container) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Container func (tr *Container) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Container func (tr *Container) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Container func (tr *Container) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Container func (tr *Container) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Container func (tr *Container) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Container func (tr *Container) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Container func (tr *Container) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Container using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Container) LateInitialize(attrs []byte) (bool, error) { - params := &ContainerParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &ContainerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Container) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/serverless/v1alpha1/zz_container_types.go b/apis/serverless/v1alpha1/zz_container_types.go index 01e1d95..f4e1d8c 100755 --- a/apis/serverless/v1alpha1/zz_container_types.go +++ b/apis/serverless/v1alpha1/zz_container_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,731 +7,668 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type ConnectivityInitParameters struct { - -// Network the revision will have access to -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Network the revision will have access to + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` } - type ConnectivityObservation struct { - -// Network the revision will have access to -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Network the revision will have access to + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` } - type ConnectivityParameters struct { - -// Network the revision will have access to -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId" tf:"network_id,omitempty"` + // Network the revision will have access to + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId" tf:"network_id,omitempty"` } - type ContainerInitParameters struct { + // Concurrency of Yandex Cloud Serverless Container + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` -// Concurrency of Yandex Cloud Serverless Container -Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` - -// Network access. If specified the revision will be attached to specified network -Connectivity []ConnectivityInitParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + // Network access. If specified the revision will be attached to specified network + Connectivity []ConnectivityInitParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` -// Core fraction (0...100) of the Yandex Cloud Serverless Container -CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + // Core fraction (0...100) of the Yandex Cloud Serverless Container + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` -Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` -// Description of the Yandex Cloud Serverless Container -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Yandex Cloud Serverless Container + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Execution timeout in seconds (duration format) for Yandex Cloud Serverless Container -ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + // Execution timeout in seconds (duration format) for Yandex Cloud Serverless Container + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` -// Folder ID for the Yandex Cloud Serverless Container -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the Yandex Cloud Serverless Container + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Revision deployment image for Yandex Cloud Serverless Container -Image []ImageInitParameters `json:"image,omitempty" tf:"image,omitempty"` + // Revision deployment image for Yandex Cloud Serverless Container + Image []ImageInitParameters `json:"image,omitempty" tf:"image,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Serverless Container -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud Serverless Container + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging from Yandex Cloud Serverless Container -LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging from Yandex Cloud Serverless Container + LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container -// Container memory in megabytes, should be aligned to 128 -Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + // Container memory in megabytes, should be aligned to 128 + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` -// Mounts for Yandex Cloud Serverless Container -Mounts []MountsInitParameters `json:"mounts,omitempty" tf:"mounts,omitempty"` + // Mounts for Yandex Cloud Serverless Container + Mounts []MountsInitParameters `json:"mounts,omitempty" tf:"mounts,omitempty"` -// Yandex Cloud Serverless Container name -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Yandex Cloud Serverless Container name + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Provision policy. If specified the revision will have prepared instances -ProvisionPolicy []ProvisionPolicyInitParameters `json:"provisionPolicy,omitempty" tf:"provision_policy,omitempty"` + // Provision policy. If specified the revision will have prepared instances + ProvisionPolicy []ProvisionPolicyInitParameters `json:"provisionPolicy,omitempty" tf:"provision_policy,omitempty"` -// Secrets for Yandex Cloud Serverless Container -Secrets []SecretsInitParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` + // Secrets for Yandex Cloud Serverless Container + Secrets []SecretsInitParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` -// Service account ID for Yandex Cloud Serverless Container -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account ID for Yandex Cloud Serverless Container + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Reference to a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` -// Selector for a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` -// (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Serverless Container -StorageMounts []StorageMountsInitParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + // (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Serverless Container + StorageMounts []StorageMountsInitParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` } - type ContainerObservation struct { + // Concurrency of Yandex Cloud Serverless Container + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` -// Concurrency of Yandex Cloud Serverless Container -Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` - -// Network access. If specified the revision will be attached to specified network -Connectivity []ConnectivityObservation `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + // Network access. If specified the revision will be attached to specified network + Connectivity []ConnectivityObservation `json:"connectivity,omitempty" tf:"connectivity,omitempty"` -// Core fraction (0...100) of the Yandex Cloud Serverless Container -CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + // Core fraction (0...100) of the Yandex Cloud Serverless Container + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` -Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` -// Creation timestamp of the Yandex Cloud Serverless Container -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Creation timestamp of the Yandex Cloud Serverless Container + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Description of the Yandex Cloud Serverless Container -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Yandex Cloud Serverless Container + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Execution timeout in seconds (duration format) for Yandex Cloud Serverless Container -ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + // Execution timeout in seconds (duration format) for Yandex Cloud Serverless Container + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` -// Folder ID for the Yandex Cloud Serverless Container -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the Yandex Cloud Serverless Container + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Secret's id -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Secret's id + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Revision deployment image for Yandex Cloud Serverless Container -Image []ImageObservation `json:"image,omitempty" tf:"image,omitempty"` + // Revision deployment image for Yandex Cloud Serverless Container + Image []ImageObservation `json:"image,omitempty" tf:"image,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Serverless Container -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud Serverless Container + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging from Yandex Cloud Serverless Container -LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging from Yandex Cloud Serverless Container + LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container -// Container memory in megabytes, should be aligned to 128 -Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + // Container memory in megabytes, should be aligned to 128 + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` -// Mounts for Yandex Cloud Serverless Container -Mounts []MountsObservation `json:"mounts,omitempty" tf:"mounts,omitempty"` + // Mounts for Yandex Cloud Serverless Container + Mounts []MountsObservation `json:"mounts,omitempty" tf:"mounts,omitempty"` -// Yandex Cloud Serverless Container name -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Yandex Cloud Serverless Container name + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Provision policy. If specified the revision will have prepared instances -ProvisionPolicy []ProvisionPolicyObservation `json:"provisionPolicy,omitempty" tf:"provision_policy,omitempty"` + // Provision policy. If specified the revision will have prepared instances + ProvisionPolicy []ProvisionPolicyObservation `json:"provisionPolicy,omitempty" tf:"provision_policy,omitempty"` -// Last revision ID of the Yandex Cloud Serverless Container -RevisionID *string `json:"revisionId,omitempty" tf:"revision_id,omitempty"` + // Last revision ID of the Yandex Cloud Serverless Container + RevisionID *string `json:"revisionId,omitempty" tf:"revision_id,omitempty"` -// Secrets for Yandex Cloud Serverless Container -Secrets []SecretsObservation `json:"secrets,omitempty" tf:"secrets,omitempty"` + // Secrets for Yandex Cloud Serverless Container + Secrets []SecretsObservation `json:"secrets,omitempty" tf:"secrets,omitempty"` -// Service account ID for Yandex Cloud Serverless Container -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account ID for Yandex Cloud Serverless Container + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Serverless Container -StorageMounts []StorageMountsObservation `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + // (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Serverless Container + StorageMounts []StorageMountsObservation `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` -// Invoke URL for the Yandex Cloud Serverless Container -URL *string `json:"url,omitempty" tf:"url,omitempty"` + // Invoke URL for the Yandex Cloud Serverless Container + URL *string `json:"url,omitempty" tf:"url,omitempty"` } - type ContainerParameters struct { + // Concurrency of Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` -// Concurrency of Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` - -// Network access. If specified the revision will be attached to specified network -// +kubebuilder:validation:Optional -Connectivity []ConnectivityParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + // Network access. If specified the revision will be attached to specified network + // +kubebuilder:validation:Optional + Connectivity []ConnectivityParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` -// Core fraction (0...100) of the Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + // Core fraction (0...100) of the Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` -// +kubebuilder:validation:Optional -Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + // +kubebuilder:validation:Optional + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` -// Description of the Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Execution timeout in seconds (duration format) for Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + // Execution timeout in seconds (duration format) for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` -// Folder ID for the Yandex Cloud Serverless Container -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the Yandex Cloud Serverless Container + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Revision deployment image for Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -Image []ImageParameters `json:"image,omitempty" tf:"image,omitempty"` + // Revision deployment image for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Image []ImageParameters `json:"image,omitempty" tf:"image,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging from Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging from Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container -// Container memory in megabytes, should be aligned to 128 -// +kubebuilder:validation:Optional -Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + // Container memory in megabytes, should be aligned to 128 + // +kubebuilder:validation:Optional + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` -// Mounts for Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -Mounts []MountsParameters `json:"mounts,omitempty" tf:"mounts,omitempty"` + // Mounts for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Mounts []MountsParameters `json:"mounts,omitempty" tf:"mounts,omitempty"` -// Yandex Cloud Serverless Container name -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Yandex Cloud Serverless Container name + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Provision policy. If specified the revision will have prepared instances -// +kubebuilder:validation:Optional -ProvisionPolicy []ProvisionPolicyParameters `json:"provisionPolicy,omitempty" tf:"provision_policy,omitempty"` + // Provision policy. If specified the revision will have prepared instances + // +kubebuilder:validation:Optional + ProvisionPolicy []ProvisionPolicyParameters `json:"provisionPolicy,omitempty" tf:"provision_policy,omitempty"` -// Secrets for Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -Secrets []SecretsParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` + // Secrets for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Secrets []SecretsParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` -// Service account ID for Yandex Cloud Serverless Container -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account ID for Yandex Cloud Serverless Container + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Reference to a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` -// Selector for a ServiceAccount in iam to populate serviceAccountId. -// +kubebuilder:validation:Optional -ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` -// (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -StorageMounts []StorageMountsParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + // (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + StorageMounts []StorageMountsParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` } - type EphemeralDiskInitParameters struct { + // Optional block size of the ephemeral disk in KB + BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` -// Optional block size of the ephemeral disk in KB -BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` - -// Size of the ephemeral disk in GB -SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` + // Size of the ephemeral disk in GB + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` } - type EphemeralDiskObservation struct { + // Optional block size of the ephemeral disk in KB + BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` -// Optional block size of the ephemeral disk in KB -BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` - -// Size of the ephemeral disk in GB -SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` + // Size of the ephemeral disk in GB + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` } - type EphemeralDiskParameters struct { + // Optional block size of the ephemeral disk in KB + // +kubebuilder:validation:Optional + BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` -// Optional block size of the ephemeral disk in KB -// +kubebuilder:validation:Optional -BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` - -// Size of the ephemeral disk in GB -// +kubebuilder:validation:Optional -SizeGb *float64 `json:"sizeGb" tf:"size_gb,omitempty"` + // Size of the ephemeral disk in GB + // +kubebuilder:validation:Optional + SizeGb *float64 `json:"sizeGb" tf:"size_gb,omitempty"` } - type ImageInitParameters struct { + // List of arguments for Yandex Cloud Serverless Container + Args []*string `json:"args,omitempty" tf:"args,omitempty"` -// List of arguments for Yandex Cloud Serverless Container -Args []*string `json:"args,omitempty" tf:"args,omitempty"` - -// List of commands for Yandex Cloud Serverless Container -Command []*string `json:"command,omitempty" tf:"command,omitempty"` + // List of commands for Yandex Cloud Serverless Container + Command []*string `json:"command,omitempty" tf:"command,omitempty"` -// Digest of image that will be deployed as Yandex Cloud Serverless Container. If presented, should be equal to digest that will be resolved at server side by URL. Container will be updated on digest change even if image.0.url stays the same. If field not specified then its value will be computed. -Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` + // Digest of image that will be deployed as Yandex Cloud Serverless Container. If presented, should be equal to digest that will be resolved at server side by URL. Container will be updated on digest change even if image.0.url stays the same. If field not specified then its value will be computed. + Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` -// A set of key/value environment variable pairs for Yandex Cloud Serverless Container -// +mapType=granular -Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + // A set of key/value environment variable pairs for Yandex Cloud Serverless Container + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` -// Invoke URL for the Yandex Cloud Serverless Container -URL *string `json:"url,omitempty" tf:"url,omitempty"` + // Invoke URL for the Yandex Cloud Serverless Container + URL *string `json:"url,omitempty" tf:"url,omitempty"` -// Working directory for Yandex Cloud Serverless Container -WorkDir *string `json:"workDir,omitempty" tf:"work_dir,omitempty"` + // Working directory for Yandex Cloud Serverless Container + WorkDir *string `json:"workDir,omitempty" tf:"work_dir,omitempty"` } - type ImageObservation struct { + // List of arguments for Yandex Cloud Serverless Container + Args []*string `json:"args,omitempty" tf:"args,omitempty"` -// List of arguments for Yandex Cloud Serverless Container -Args []*string `json:"args,omitempty" tf:"args,omitempty"` - -// List of commands for Yandex Cloud Serverless Container -Command []*string `json:"command,omitempty" tf:"command,omitempty"` + // List of commands for Yandex Cloud Serverless Container + Command []*string `json:"command,omitempty" tf:"command,omitempty"` -// Digest of image that will be deployed as Yandex Cloud Serverless Container. If presented, should be equal to digest that will be resolved at server side by URL. Container will be updated on digest change even if image.0.url stays the same. If field not specified then its value will be computed. -Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` + // Digest of image that will be deployed as Yandex Cloud Serverless Container. If presented, should be equal to digest that will be resolved at server side by URL. Container will be updated on digest change even if image.0.url stays the same. If field not specified then its value will be computed. + Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` -// A set of key/value environment variable pairs for Yandex Cloud Serverless Container -// +mapType=granular -Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + // A set of key/value environment variable pairs for Yandex Cloud Serverless Container + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` -// Invoke URL for the Yandex Cloud Serverless Container -URL *string `json:"url,omitempty" tf:"url,omitempty"` + // Invoke URL for the Yandex Cloud Serverless Container + URL *string `json:"url,omitempty" tf:"url,omitempty"` -// Working directory for Yandex Cloud Serverless Container -WorkDir *string `json:"workDir,omitempty" tf:"work_dir,omitempty"` + // Working directory for Yandex Cloud Serverless Container + WorkDir *string `json:"workDir,omitempty" tf:"work_dir,omitempty"` } - type ImageParameters struct { + // List of arguments for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` -// List of arguments for Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -Args []*string `json:"args,omitempty" tf:"args,omitempty"` - -// List of commands for Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -Command []*string `json:"command,omitempty" tf:"command,omitempty"` + // List of commands for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Command []*string `json:"command,omitempty" tf:"command,omitempty"` -// Digest of image that will be deployed as Yandex Cloud Serverless Container. If presented, should be equal to digest that will be resolved at server side by URL. Container will be updated on digest change even if image.0.url stays the same. If field not specified then its value will be computed. -// +kubebuilder:validation:Optional -Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` + // Digest of image that will be deployed as Yandex Cloud Serverless Container. If presented, should be equal to digest that will be resolved at server side by URL. Container will be updated on digest change even if image.0.url stays the same. If field not specified then its value will be computed. + // +kubebuilder:validation:Optional + Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` -// A set of key/value environment variable pairs for Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -// +mapType=granular -Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + // A set of key/value environment variable pairs for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` -// Invoke URL for the Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -URL *string `json:"url" tf:"url,omitempty"` + // Invoke URL for the Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` -// Working directory for Yandex Cloud Serverless Container -// +kubebuilder:validation:Optional -WorkDir *string `json:"workDir,omitempty" tf:"work_dir,omitempty"` + // Working directory for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + WorkDir *string `json:"workDir,omitempty" tf:"work_dir,omitempty"` } - type LogOptionsInitParameters struct { + // Is logging from container disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging from container disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// Log entries are written to default log group for specified folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type LogOptionsObservation struct { + // Is logging from container disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging from container disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// Log entries are written to default log group for specified folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type LogOptionsParameters struct { + // Is logging from container disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging from container disabled -// +kubebuilder:validation:Optional -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// Log entries are written to default log group for specified folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to default log group for specified folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to specified log group -// +kubebuilder:validation:Optional -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Minimum log entry level -// +kubebuilder:validation:Optional -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type MountsInitParameters struct { + // One of the available mount types. Disk available during the function execution time + EphemeralDisk []EphemeralDiskInitParameters `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` -// One of the available mount types. Disk available during the function execution time -EphemeralDisk []EphemeralDiskInitParameters `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` + // Mount’s accessibility mode. Valid values are ro and rw + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Mount’s accessibility mode. Valid values are ro and rw -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Path inside the container to access the directory in which the bucket is mounted + MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` -// Path inside the container to access the directory in which the bucket is mounted -MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` - -// One of the available mount types. Object storage as a mount -ObjectStorage []ObjectStorageInitParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + // One of the available mount types. Object storage as a mount + ObjectStorage []ObjectStorageInitParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` } - type MountsObservation struct { + // One of the available mount types. Disk available during the function execution time + EphemeralDisk []EphemeralDiskObservation `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` -// One of the available mount types. Disk available during the function execution time -EphemeralDisk []EphemeralDiskObservation `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` + // Mount’s accessibility mode. Valid values are ro and rw + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Mount’s accessibility mode. Valid values are ro and rw -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Path inside the container to access the directory in which the bucket is mounted + MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` -// Path inside the container to access the directory in which the bucket is mounted -MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` - -// One of the available mount types. Object storage as a mount -ObjectStorage []ObjectStorageObservation `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + // One of the available mount types. Object storage as a mount + ObjectStorage []ObjectStorageObservation `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` } - type MountsParameters struct { + // One of the available mount types. Disk available during the function execution time + // +kubebuilder:validation:Optional + EphemeralDisk []EphemeralDiskParameters `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` -// One of the available mount types. Disk available during the function execution time -// +kubebuilder:validation:Optional -EphemeralDisk []EphemeralDiskParameters `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` + // Mount’s accessibility mode. Valid values are ro and rw + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Mount’s accessibility mode. Valid values are ro and rw -// +kubebuilder:validation:Optional -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Path inside the container to access the directory in which the bucket is mounted + // +kubebuilder:validation:Optional + MountPointPath *string `json:"mountPointPath" tf:"mount_point_path,omitempty"` -// Path inside the container to access the directory in which the bucket is mounted -// +kubebuilder:validation:Optional -MountPointPath *string `json:"mountPointPath" tf:"mount_point_path,omitempty"` - -// One of the available mount types. Object storage as a mount -// +kubebuilder:validation:Optional -ObjectStorage []ObjectStorageParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + // One of the available mount types. Object storage as a mount + // +kubebuilder:validation:Optional + ObjectStorage []ObjectStorageParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` } - type ObjectStorageInitParameters struct { + // Name of the mounting bucket + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` -// Reference to a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` -// Selector for a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` - -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` } - type ObjectStorageObservation struct { + // Name of the mounting bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` - -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` } - type ObjectStorageParameters struct { + // Name of the mounting bucket + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) -// +kubebuilder:validation:Optional -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` -// Reference to a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` -// Selector for a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` - -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` } - type ProvisionPolicyInitParameters struct { - -// Minimum number of prepared instances that are always ready to serve requests -MinInstances *float64 `json:"minInstances,omitempty" tf:"min_instances,omitempty"` + // Minimum number of prepared instances that are always ready to serve requests + MinInstances *float64 `json:"minInstances,omitempty" tf:"min_instances,omitempty"` } - type ProvisionPolicyObservation struct { - -// Minimum number of prepared instances that are always ready to serve requests -MinInstances *float64 `json:"minInstances,omitempty" tf:"min_instances,omitempty"` + // Minimum number of prepared instances that are always ready to serve requests + MinInstances *float64 `json:"minInstances,omitempty" tf:"min_instances,omitempty"` } - type ProvisionPolicyParameters struct { - -// Minimum number of prepared instances that are always ready to serve requests -// +kubebuilder:validation:Optional -MinInstances *float64 `json:"minInstances" tf:"min_instances,omitempty"` + // Minimum number of prepared instances that are always ready to serve requests + // +kubebuilder:validation:Optional + MinInstances *float64 `json:"minInstances" tf:"min_instances,omitempty"` } - type SecretsInitParameters struct { + // Container's environment variable in which secret's value will be stored + EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` -// Container's environment variable in which secret's value will be stored -EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + // Secret's id + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Secret's id -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.Secret -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Reference to a Secret in lockbox to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` -// Reference to a Secret in lockbox to populate id. -// +kubebuilder:validation:Optional -IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + // Selector for a Secret in lockbox to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` -// Selector for a Secret in lockbox to populate id. -// +kubebuilder:validation:Optional -IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + // Secret's entries key which value will be stored in environment variable + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// Secret's entries key which value will be stored in environment variable -Key *string `json:"key,omitempty" tf:"key,omitempty"` + // Secret's version id + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.SecretVersion + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` -// Secret's version id -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.SecretVersion -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + // Reference to a SecretVersion in lockbox to populate versionId. + // +kubebuilder:validation:Optional + VersionIDRef *v1.Reference `json:"versionIdRef,omitempty" tf:"-"` -// Reference to a SecretVersion in lockbox to populate versionId. -// +kubebuilder:validation:Optional -VersionIDRef *v1.Reference `json:"versionIdRef,omitempty" tf:"-"` - -// Selector for a SecretVersion in lockbox to populate versionId. -// +kubebuilder:validation:Optional -VersionIDSelector *v1.Selector `json:"versionIdSelector,omitempty" tf:"-"` + // Selector for a SecretVersion in lockbox to populate versionId. + // +kubebuilder:validation:Optional + VersionIDSelector *v1.Selector `json:"versionIdSelector,omitempty" tf:"-"` } - type SecretsObservation struct { + // Container's environment variable in which secret's value will be stored + EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` -// Container's environment variable in which secret's value will be stored -EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + // Secret's id + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Secret's id -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Secret's entries key which value will be stored in environment variable + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// Secret's entries key which value will be stored in environment variable -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// Secret's version id -VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + // Secret's version id + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` } - type SecretsParameters struct { + // Container's environment variable in which secret's value will be stored + // +kubebuilder:validation:Optional + EnvironmentVariable *string `json:"environmentVariable" tf:"environment_variable,omitempty"` -// Container's environment variable in which secret's value will be stored -// +kubebuilder:validation:Optional -EnvironmentVariable *string `json:"environmentVariable" tf:"environment_variable,omitempty"` + // Secret's id + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Secret's id -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.Secret -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Reference to a Secret in lockbox to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` -// Reference to a Secret in lockbox to populate id. -// +kubebuilder:validation:Optional -IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + // Selector for a Secret in lockbox to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` -// Selector for a Secret in lockbox to populate id. -// +kubebuilder:validation:Optional -IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + // Secret's entries key which value will be stored in environment variable + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` -// Secret's entries key which value will be stored in environment variable -// +kubebuilder:validation:Optional -Key *string `json:"key" tf:"key,omitempty"` + // Secret's version id + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.SecretVersion + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` -// Secret's version id -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.SecretVersion -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + // Reference to a SecretVersion in lockbox to populate versionId. + // +kubebuilder:validation:Optional + VersionIDRef *v1.Reference `json:"versionIdRef,omitempty" tf:"-"` -// Reference to a SecretVersion in lockbox to populate versionId. -// +kubebuilder:validation:Optional -VersionIDRef *v1.Reference `json:"versionIdRef,omitempty" tf:"-"` - -// Selector for a SecretVersion in lockbox to populate versionId. -// +kubebuilder:validation:Optional -VersionIDSelector *v1.Selector `json:"versionIdSelector,omitempty" tf:"-"` + // Selector for a SecretVersion in lockbox to populate versionId. + // +kubebuilder:validation:Optional + VersionIDSelector *v1.Selector `json:"versionIdSelector,omitempty" tf:"-"` } - type StorageMountsInitParameters struct { + // Name of the mounting bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + // Path inside the container to access the directory in which the bucket is mounted + MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` -// Path inside the container to access the directory in which the bucket is mounted -MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` - -// Mount the bucket in read-only mode -ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + // Mount the bucket in read-only mode + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` } - type StorageMountsObservation struct { + // Name of the mounting bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + // Path inside the container to access the directory in which the bucket is mounted + MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` -// Path inside the container to access the directory in which the bucket is mounted -MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` - -// Mount the bucket in read-only mode -ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + // Mount the bucket in read-only mode + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` } - type StorageMountsParameters struct { + // Name of the mounting bucket + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` -// Name of the mounting bucket -// +kubebuilder:validation:Optional -Bucket *string `json:"bucket" tf:"bucket,omitempty"` + // Path inside the container to access the directory in which the bucket is mounted + // +kubebuilder:validation:Optional + MountPointPath *string `json:"mountPointPath" tf:"mount_point_path,omitempty"` -// Path inside the container to access the directory in which the bucket is mounted -// +kubebuilder:validation:Optional -MountPointPath *string `json:"mountPointPath" tf:"mount_point_path,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` - -// Mount the bucket in read-only mode -// +kubebuilder:validation:Optional -ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + // Mount the bucket in read-only mode + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` } // ContainerSpec defines the desired state of Container type ContainerSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider ContainerParameters `json:"forProvider"` + ForProvider ContainerParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -744,20 +679,19 @@ type ContainerSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider ContainerInitParameters `json:"initProvider,omitempty"` + InitProvider ContainerInitParameters `json:"initProvider,omitempty"` } // ContainerStatus defines the observed state of Container. type ContainerStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider ContainerObservation `json:"atProvider,omitempty"` + AtProvider ContainerObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Container is the Schema for the Containers API. Allows management of a Yandex Cloud Serverless Container. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -767,11 +701,11 @@ type ContainerStatus struct { type Container struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.image) || (has(self.initProvider) && has(self.initProvider.image))",message="spec.forProvider.image is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.memory) || (has(self.initProvider) && has(self.initProvider.memory))",message="spec.forProvider.memory is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" - Spec ContainerSpec `json:"spec"` - Status ContainerStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.image) || (has(self.initProvider) && has(self.initProvider.image))",message="spec.forProvider.image is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.memory) || (has(self.initProvider) && has(self.initProvider.memory))",message="spec.forProvider.memory is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ContainerSpec `json:"spec"` + Status ContainerStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/serverless/v1alpha1/zz_containeriambinding_terraformed.go b/apis/serverless/v1alpha1/zz_containeriambinding_terraformed.go index 45b1cc6..0c9500d 100755 --- a/apis/serverless/v1alpha1/zz_containeriambinding_terraformed.go +++ b/apis/serverless/v1alpha1/zz_containeriambinding_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this ContainerIAMBinding func (mg *ContainerIAMBinding) GetTerraformResourceType() string { - return "yandex_serverless_container_iam_binding" + return "yandex_serverless_container_iam_binding" } // GetConnectionDetailsMapping for this ContainerIAMBinding func (tr *ContainerIAMBinding) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this ContainerIAMBinding func (tr *ContainerIAMBinding) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this ContainerIAMBinding func (tr *ContainerIAMBinding) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this ContainerIAMBinding func (tr *ContainerIAMBinding) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this ContainerIAMBinding func (tr *ContainerIAMBinding) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this ContainerIAMBinding func (tr *ContainerIAMBinding) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this ContainerIAMBinding func (tr *ContainerIAMBinding) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this ContainerIAMBinding func (tr *ContainerIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this ContainerIAMBinding using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *ContainerIAMBinding) LateInitialize(attrs []byte) (bool, error) { - params := &ContainerIAMBindingParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &ContainerIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *ContainerIAMBinding) GetTerraformSchemaVersion() int { - return 0 + return 0 } diff --git a/apis/serverless/v1alpha1/zz_containeriambinding_types.go b/apis/serverless/v1alpha1/zz_containeriambinding_types.go index 21d1d07..81896b3 100755 --- a/apis/serverless/v1alpha1/zz_containeriambinding_types.go +++ b/apis/serverless/v1alpha1/zz_containeriambinding_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,72 +7,63 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type ContainerIAMBindingInitParameters struct { + // The Yandex Serverless Container ID to apply a binding to. + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` -// The Yandex Serverless Container ID to apply a binding to. -ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // The role that should be applied. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// The role that should be applied. -Role *string `json:"role,omitempty" tf:"role,omitempty"` - -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type ContainerIAMBindingObservation struct { + // The Yandex Serverless Container ID to apply a binding to. + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` -// The Yandex Serverless Container ID to apply a binding to. -ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` - -ID *string `json:"id,omitempty" tf:"id,omitempty"` + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be applied. -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. + Role *string `json:"role,omitempty" tf:"role,omitempty"` -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } - type ContainerIAMBindingParameters struct { + // The Yandex Serverless Container ID to apply a binding to. + // +kubebuilder:validation:Optional + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` -// The Yandex Serverless Container ID to apply a binding to. -// +kubebuilder:validation:Optional -ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` - -// Identities that will be granted the privilege in role. Each entry can have one of the following values: -// +kubebuilder:validation:Optional -// +listType=set -Members []*string `json:"members,omitempty" tf:"members,omitempty"` + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` -// The role that should be applied. -// +kubebuilder:validation:Optional -Role *string `json:"role,omitempty" tf:"role,omitempty"` + // The role that should be applied. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` -// +kubebuilder:validation:Optional -SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` } // ContainerIAMBindingSpec defines the desired state of ContainerIAMBinding type ContainerIAMBindingSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider ContainerIAMBindingParameters `json:"forProvider"` + ForProvider ContainerIAMBindingParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -85,20 +74,19 @@ type ContainerIAMBindingSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider ContainerIAMBindingInitParameters `json:"initProvider,omitempty"` + InitProvider ContainerIAMBindingInitParameters `json:"initProvider,omitempty"` } // ContainerIAMBindingStatus defines the observed state of ContainerIAMBinding. type ContainerIAMBindingStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider ContainerIAMBindingObservation `json:"atProvider,omitempty"` + AtProvider ContainerIAMBindingObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // ContainerIAMBinding is the Schema for the ContainerIAMBindings API. Allows management of a single IAM binding for a // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -108,11 +96,11 @@ type ContainerIAMBindingStatus struct { type ContainerIAMBinding struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.containerId) || (has(self.initProvider) && has(self.initProvider.containerId))",message="spec.forProvider.containerId is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" - Spec ContainerIAMBindingSpec `json:"spec"` - Status ContainerIAMBindingStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.containerId) || (has(self.initProvider) && has(self.initProvider.containerId))",message="spec.forProvider.containerId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec ContainerIAMBindingSpec `json:"spec"` + Status ContainerIAMBindingStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/serverless/v1alpha1/zz_generated.conversion_hubs.go b/apis/serverless/v1alpha1/zz_generated.conversion_hubs.go index b2db5cf..e99fddc 100755 --- a/apis/serverless/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/serverless/v1alpha1/zz_generated.conversion_hubs.go @@ -1,13 +1,9 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 +// Hub marks this type as a conversion hub. +func (tr *Container) Hub() {} - // Hub marks this type as a conversion hub. - func (tr *Container) Hub() {} - - // Hub marks this type as a conversion hub. - func (tr *ContainerIAMBinding) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *ContainerIAMBinding) Hub() {} diff --git a/apis/serverless/v1alpha1/zz_generated.deepcopy.go b/apis/serverless/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..f64775b --- /dev/null +++ b/apis/serverless/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1727 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityInitParameters) DeepCopyInto(out *ConnectivityInitParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInitParameters. +func (in *ConnectivityInitParameters) DeepCopy() *ConnectivityInitParameters { + if in == nil { + return nil + } + out := new(ConnectivityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityObservation) DeepCopyInto(out *ConnectivityObservation) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityObservation. +func (in *ConnectivityObservation) DeepCopy() *ConnectivityObservation { + if in == nil { + return nil + } + out := new(ConnectivityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityParameters) DeepCopyInto(out *ConnectivityParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityParameters. +func (in *ConnectivityParameters) DeepCopy() *ConnectivityParameters { + if in == nil { + return nil + } + out := new(ConnectivityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Container) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBinding) DeepCopyInto(out *ContainerIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBinding. +func (in *ContainerIAMBinding) DeepCopy() *ContainerIAMBinding { + if in == nil { + return nil + } + out := new(ContainerIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingInitParameters) DeepCopyInto(out *ContainerIAMBindingInitParameters) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingInitParameters. +func (in *ContainerIAMBindingInitParameters) DeepCopy() *ContainerIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(ContainerIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingList) DeepCopyInto(out *ContainerIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContainerIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingList. +func (in *ContainerIAMBindingList) DeepCopy() *ContainerIAMBindingList { + if in == nil { + return nil + } + out := new(ContainerIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingObservation) DeepCopyInto(out *ContainerIAMBindingObservation) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingObservation. +func (in *ContainerIAMBindingObservation) DeepCopy() *ContainerIAMBindingObservation { + if in == nil { + return nil + } + out := new(ContainerIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingParameters) DeepCopyInto(out *ContainerIAMBindingParameters) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingParameters. +func (in *ContainerIAMBindingParameters) DeepCopy() *ContainerIAMBindingParameters { + if in == nil { + return nil + } + out := new(ContainerIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingSpec) DeepCopyInto(out *ContainerIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingSpec. +func (in *ContainerIAMBindingSpec) DeepCopy() *ContainerIAMBindingSpec { + if in == nil { + return nil + } + out := new(ContainerIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingStatus) DeepCopyInto(out *ContainerIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingStatus. +func (in *ContainerIAMBindingStatus) DeepCopy() *ContainerIAMBindingStatus { + if in == nil { + return nil + } + out := new(ContainerIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerInitParameters) DeepCopyInto(out *ContainerInitParameters) { + *out = *in + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = make([]ImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]MountsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProvisionPolicy != nil { + in, out := &in.ProvisionPolicy, &out.ProvisionPolicy + *out = make([]ProvisionPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInitParameters. +func (in *ContainerInitParameters) DeepCopy() *ContainerInitParameters { + if in == nil { + return nil + } + out := new(ContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerList) DeepCopyInto(out *ContainerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerList. +func (in *ContainerList) DeepCopy() *ContainerList { + if in == nil { + return nil + } + out := new(ContainerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerObservation) DeepCopyInto(out *ContainerObservation) { + *out = *in + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = make([]ImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]MountsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProvisionPolicy != nil { + in, out := &in.ProvisionPolicy, &out.ProvisionPolicy + *out = make([]ProvisionPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RevisionID != nil { + in, out := &in.RevisionID, &out.RevisionID + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerObservation. +func (in *ContainerObservation) DeepCopy() *ContainerObservation { + if in == nil { + return nil + } + out := new(ContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerParameters) DeepCopyInto(out *ContainerParameters) { + *out = *in + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = make([]ImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]MountsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProvisionPolicy != nil { + in, out := &in.ProvisionPolicy, &out.ProvisionPolicy + *out = make([]ProvisionPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParameters. +func (in *ContainerParameters) DeepCopy() *ContainerParameters { + if in == nil { + return nil + } + out := new(ContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSpec) DeepCopyInto(out *ContainerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSpec. +func (in *ContainerSpec) DeepCopy() *ContainerSpec { + if in == nil { + return nil + } + out := new(ContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. +func (in *ContainerStatus) DeepCopy() *ContainerStatus { + if in == nil { + return nil + } + out := new(ContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralDiskInitParameters) DeepCopyInto(out *EphemeralDiskInitParameters) { + *out = *in + if in.BlockSizeKb != nil { + in, out := &in.BlockSizeKb, &out.BlockSizeKb + *out = new(float64) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralDiskInitParameters. +func (in *EphemeralDiskInitParameters) DeepCopy() *EphemeralDiskInitParameters { + if in == nil { + return nil + } + out := new(EphemeralDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralDiskObservation) DeepCopyInto(out *EphemeralDiskObservation) { + *out = *in + if in.BlockSizeKb != nil { + in, out := &in.BlockSizeKb, &out.BlockSizeKb + *out = new(float64) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralDiskObservation. +func (in *EphemeralDiskObservation) DeepCopy() *EphemeralDiskObservation { + if in == nil { + return nil + } + out := new(EphemeralDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralDiskParameters) DeepCopyInto(out *EphemeralDiskParameters) { + *out = *in + if in.BlockSizeKb != nil { + in, out := &in.BlockSizeKb, &out.BlockSizeKb + *out = new(float64) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralDiskParameters. +func (in *EphemeralDiskParameters) DeepCopy() *EphemeralDiskParameters { + if in == nil { + return nil + } + out := new(EphemeralDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageInitParameters) DeepCopyInto(out *ImageInitParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.WorkDir != nil { + in, out := &in.WorkDir, &out.WorkDir + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageInitParameters. +func (in *ImageInitParameters) DeepCopy() *ImageInitParameters { + if in == nil { + return nil + } + out := new(ImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageObservation) DeepCopyInto(out *ImageObservation) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.WorkDir != nil { + in, out := &in.WorkDir, &out.WorkDir + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageObservation. +func (in *ImageObservation) DeepCopy() *ImageObservation { + if in == nil { + return nil + } + out := new(ImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageParameters) DeepCopyInto(out *ImageParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.WorkDir != nil { + in, out := &in.WorkDir, &out.WorkDir + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageParameters. +func (in *ImageParameters) DeepCopy() *ImageParameters { + if in == nil { + return nil + } + out := new(ImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsInitParameters) DeepCopyInto(out *LogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsInitParameters. +func (in *LogOptionsInitParameters) DeepCopy() *LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsObservation) DeepCopyInto(out *LogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsObservation. +func (in *LogOptionsObservation) DeepCopy() *LogOptionsObservation { + if in == nil { + return nil + } + out := new(LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsParameters) DeepCopyInto(out *LogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsParameters. +func (in *LogOptionsParameters) DeepCopy() *LogOptionsParameters { + if in == nil { + return nil + } + out := new(LogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MountsInitParameters) DeepCopyInto(out *MountsInitParameters) { + *out = *in + if in.EphemeralDisk != nil { + in, out := &in.EphemeralDisk, &out.EphemeralDisk + *out = make([]EphemeralDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.MountPointPath != nil { + in, out := &in.MountPointPath, &out.MountPointPath + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MountsInitParameters. +func (in *MountsInitParameters) DeepCopy() *MountsInitParameters { + if in == nil { + return nil + } + out := new(MountsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MountsObservation) DeepCopyInto(out *MountsObservation) { + *out = *in + if in.EphemeralDisk != nil { + in, out := &in.EphemeralDisk, &out.EphemeralDisk + *out = make([]EphemeralDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.MountPointPath != nil { + in, out := &in.MountPointPath, &out.MountPointPath + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MountsObservation. +func (in *MountsObservation) DeepCopy() *MountsObservation { + if in == nil { + return nil + } + out := new(MountsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MountsParameters) DeepCopyInto(out *MountsParameters) { + *out = *in + if in.EphemeralDisk != nil { + in, out := &in.EphemeralDisk, &out.EphemeralDisk + *out = make([]EphemeralDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.MountPointPath != nil { + in, out := &in.MountPointPath, &out.MountPointPath + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MountsParameters. +func (in *MountsParameters) DeepCopy() *MountsParameters { + if in == nil { + return nil + } + out := new(MountsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageInitParameters) DeepCopyInto(out *ObjectStorageInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageInitParameters. +func (in *ObjectStorageInitParameters) DeepCopy() *ObjectStorageInitParameters { + if in == nil { + return nil + } + out := new(ObjectStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageObservation) DeepCopyInto(out *ObjectStorageObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageObservation. +func (in *ObjectStorageObservation) DeepCopy() *ObjectStorageObservation { + if in == nil { + return nil + } + out := new(ObjectStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageParameters) DeepCopyInto(out *ObjectStorageParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageParameters. +func (in *ObjectStorageParameters) DeepCopy() *ObjectStorageParameters { + if in == nil { + return nil + } + out := new(ObjectStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionPolicyInitParameters) DeepCopyInto(out *ProvisionPolicyInitParameters) { + *out = *in + if in.MinInstances != nil { + in, out := &in.MinInstances, &out.MinInstances + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionPolicyInitParameters. +func (in *ProvisionPolicyInitParameters) DeepCopy() *ProvisionPolicyInitParameters { + if in == nil { + return nil + } + out := new(ProvisionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionPolicyObservation) DeepCopyInto(out *ProvisionPolicyObservation) { + *out = *in + if in.MinInstances != nil { + in, out := &in.MinInstances, &out.MinInstances + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionPolicyObservation. +func (in *ProvisionPolicyObservation) DeepCopy() *ProvisionPolicyObservation { + if in == nil { + return nil + } + out := new(ProvisionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionPolicyParameters) DeepCopyInto(out *ProvisionPolicyParameters) { + *out = *in + if in.MinInstances != nil { + in, out := &in.MinInstances, &out.MinInstances + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionPolicyParameters. +func (in *ProvisionPolicyParameters) DeepCopy() *ProvisionPolicyParameters { + if in == nil { + return nil + } + out := new(ProvisionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsInitParameters) DeepCopyInto(out *SecretsInitParameters) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } + if in.VersionIDRef != nil { + in, out := &in.VersionIDRef, &out.VersionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VersionIDSelector != nil { + in, out := &in.VersionIDSelector, &out.VersionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsInitParameters. +func (in *SecretsInitParameters) DeepCopy() *SecretsInitParameters { + if in == nil { + return nil + } + out := new(SecretsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsObservation) DeepCopyInto(out *SecretsObservation) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsObservation. +func (in *SecretsObservation) DeepCopy() *SecretsObservation { + if in == nil { + return nil + } + out := new(SecretsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsParameters) DeepCopyInto(out *SecretsParameters) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } + if in.VersionIDRef != nil { + in, out := &in.VersionIDRef, &out.VersionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VersionIDSelector != nil { + in, out := &in.VersionIDSelector, &out.VersionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsParameters. +func (in *SecretsParameters) DeepCopy() *SecretsParameters { + if in == nil { + return nil + } + out := new(SecretsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsInitParameters) DeepCopyInto(out *StorageMountsInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointPath != nil { + in, out := &in.MountPointPath, &out.MountPointPath + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsInitParameters. +func (in *StorageMountsInitParameters) DeepCopy() *StorageMountsInitParameters { + if in == nil { + return nil + } + out := new(StorageMountsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsObservation) DeepCopyInto(out *StorageMountsObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointPath != nil { + in, out := &in.MountPointPath, &out.MountPointPath + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsObservation. +func (in *StorageMountsObservation) DeepCopy() *StorageMountsObservation { + if in == nil { + return nil + } + out := new(StorageMountsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsParameters) DeepCopyInto(out *StorageMountsParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointPath != nil { + in, out := &in.MountPointPath, &out.MountPointPath + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsParameters. +func (in *StorageMountsParameters) DeepCopy() *StorageMountsParameters { + if in == nil { + return nil + } + out := new(StorageMountsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/serverless/v1alpha1/zz_generated.resolvers.go b/apis/serverless/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..0194a5c --- /dev/null +++ b/apis/serverless/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,201 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha13 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Container. +func (mg *Container) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Mounts); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Mounts[i3].ObjectStorage); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].Bucket), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].BucketRef, + Selector: mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].BucketSelector, + To: reference.To{ + List: &v1alpha11.BucketList{}, + Managed: &v1alpha11.Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].Bucket") + } + mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].BucketRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Secrets); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Secrets[i3].ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Secrets[i3].IDRef, + Selector: mg.Spec.ForProvider.Secrets[i3].IDSelector, + To: reference.To{ + List: &v1alpha12.SecretList{}, + Managed: &v1alpha12.Secret{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Secrets[i3].ID") + } + mg.Spec.ForProvider.Secrets[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Secrets[i3].IDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Secrets); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Secrets[i3].VersionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Secrets[i3].VersionIDRef, + Selector: mg.Spec.ForProvider.Secrets[i3].VersionIDSelector, + To: reference.To{ + List: &v1alpha12.SecretVersionList{}, + Managed: &v1alpha12.SecretVersion{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Secrets[i3].VersionID") + } + mg.Spec.ForProvider.Secrets[i3].VersionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Secrets[i3].VersionIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha13.ServiceAccountList{}, + Managed: &v1alpha13.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Mounts); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Mounts[i3].ObjectStorage); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].Bucket), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].BucketRef, + Selector: mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].BucketSelector, + To: reference.To{ + List: &v1alpha11.BucketList{}, + Managed: &v1alpha11.Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].Bucket") + } + mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].BucketRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Secrets); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Secrets[i3].ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Secrets[i3].IDRef, + Selector: mg.Spec.InitProvider.Secrets[i3].IDSelector, + To: reference.To{ + List: &v1alpha12.SecretList{}, + Managed: &v1alpha12.Secret{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Secrets[i3].ID") + } + mg.Spec.InitProvider.Secrets[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Secrets[i3].IDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Secrets); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Secrets[i3].VersionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Secrets[i3].VersionIDRef, + Selector: mg.Spec.InitProvider.Secrets[i3].VersionIDSelector, + To: reference.To{ + List: &v1alpha12.SecretVersionList{}, + Managed: &v1alpha12.SecretVersion{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Secrets[i3].VersionID") + } + mg.Spec.InitProvider.Secrets[i3].VersionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Secrets[i3].VersionIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha13.ServiceAccountList{}, + Managed: &v1alpha13.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/serverless/v1alpha1/zz_groupversion_info.go b/apis/serverless/v1alpha1/zz_groupversion_info.go index 05cbe30..cff40fb 100755 --- a/apis/serverless/v1alpha1/zz_groupversion_info.go +++ b/apis/serverless/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/smartcaptcha/v1alpha1/zz_captcha_terraformed.go b/apis/smartcaptcha/v1alpha1/zz_captcha_terraformed.go index aa2cd9e..0a575ad 100755 --- a/apis/smartcaptcha/v1alpha1/zz_captcha_terraformed.go +++ b/apis/smartcaptcha/v1alpha1/zz_captcha_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Captcha func (mg *Captcha) GetTerraformResourceType() string { - return "yandex_smartcaptcha_captcha" + return "yandex_smartcaptcha_captcha" } // GetConnectionDetailsMapping for this Captcha func (tr *Captcha) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Captcha func (tr *Captcha) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Captcha func (tr *Captcha) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Captcha func (tr *Captcha) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Captcha func (tr *Captcha) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Captcha func (tr *Captcha) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Captcha func (tr *Captcha) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Captcha func (tr *Captcha) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Captcha using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Captcha) LateInitialize(attrs []byte) (bool, error) { - params := &CaptchaParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &CaptchaParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Captcha) GetTerraformSchemaVersion() int { - return 1 + return 1 } diff --git a/apis/smartcaptcha/v1alpha1/zz_captcha_types.go b/apis/smartcaptcha/v1alpha1/zz_captcha_types.go index cf4f42d..efb5520 100755 --- a/apis/smartcaptcha/v1alpha1/zz_captcha_types.go +++ b/apis/smartcaptcha/v1alpha1/zz_captcha_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,950 +7,845 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type CaptchaInitParameters struct { + // (List of String) + AllowedSites []*string `json:"allowedSites,omitempty" tf:"allowed_sites,omitempty"` -// (List of String) -AllowedSites []*string `json:"allowedSites,omitempty" tf:"allowed_sites,omitempty"` - -// (String) -ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` + // (String) + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` -// (String) -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + // (String) + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// (String) -Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + // (String) + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` -// (Boolean) -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // (Boolean) + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (Block List) (see below for nested schema) -OverrideVariant []OverrideVariantInitParameters `json:"overrideVariant,omitempty" tf:"override_variant,omitempty"` + // (Block List) (see below for nested schema) + OverrideVariant []OverrideVariantInitParameters `json:"overrideVariant,omitempty" tf:"override_variant,omitempty"` -// (String) -PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` + // (String) + PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` -// (Block List) (see below for nested schema) -SecurityRule []SecurityRuleInitParameters `json:"securityRule,omitempty" tf:"security_rule,omitempty"` + // (Block List) (see below for nested schema) + SecurityRule []SecurityRuleInitParameters `json:"securityRule,omitempty" tf:"security_rule,omitempty"` -// (String) -StyleJSON *string `json:"styleJson,omitempty" tf:"style_json,omitempty"` + // (String) + StyleJSON *string `json:"styleJson,omitempty" tf:"style_json,omitempty"` -// (Boolean) -TurnOffHostnameCheck *bool `json:"turnOffHostnameCheck,omitempty" tf:"turn_off_hostname_check,omitempty"` + // (Boolean) + TurnOffHostnameCheck *bool `json:"turnOffHostnameCheck,omitempty" tf:"turn_off_hostname_check,omitempty"` } - type CaptchaObservation struct { + // (List of String) + AllowedSites []*string `json:"allowedSites,omitempty" tf:"allowed_sites,omitempty"` -// (List of String) -AllowedSites []*string `json:"allowedSites,omitempty" tf:"allowed_sites,omitempty"` - -// (String) -ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` + // (String) + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` -// (String) -ClientKey *string `json:"clientKey,omitempty" tf:"client_key,omitempty"` + // (String) + ClientKey *string `json:"clientKey,omitempty" tf:"client_key,omitempty"` -// (String) -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + // (String) + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// (String) -Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + // (String) + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` -// (String) -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // (String) + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// (Boolean) -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // (Boolean) + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// (String) -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // (String) + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// (String) The ID of this resource. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (Block List) (see below for nested schema) -OverrideVariant []OverrideVariantObservation `json:"overrideVariant,omitempty" tf:"override_variant,omitempty"` + // (Block List) (see below for nested schema) + OverrideVariant []OverrideVariantObservation `json:"overrideVariant,omitempty" tf:"override_variant,omitempty"` -// (String) -PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` + // (String) + PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` -// (Block List) (see below for nested schema) -SecurityRule []SecurityRuleObservation `json:"securityRule,omitempty" tf:"security_rule,omitempty"` + // (Block List) (see below for nested schema) + SecurityRule []SecurityRuleObservation `json:"securityRule,omitempty" tf:"security_rule,omitempty"` -// (String) -StyleJSON *string `json:"styleJson,omitempty" tf:"style_json,omitempty"` + // (String) + StyleJSON *string `json:"styleJson,omitempty" tf:"style_json,omitempty"` -// (Boolean) -Suspend *bool `json:"suspend,omitempty" tf:"suspend,omitempty"` + // (Boolean) + Suspend *bool `json:"suspend,omitempty" tf:"suspend,omitempty"` -// (Boolean) -TurnOffHostnameCheck *bool `json:"turnOffHostnameCheck,omitempty" tf:"turn_off_hostname_check,omitempty"` + // (Boolean) + TurnOffHostnameCheck *bool `json:"turnOffHostnameCheck,omitempty" tf:"turn_off_hostname_check,omitempty"` } - type CaptchaParameters struct { + // (List of String) + // +kubebuilder:validation:Optional + AllowedSites []*string `json:"allowedSites,omitempty" tf:"allowed_sites,omitempty"` -// (List of String) -// +kubebuilder:validation:Optional -AllowedSites []*string `json:"allowedSites,omitempty" tf:"allowed_sites,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` -// (String) -// +kubebuilder:validation:Optional -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + // (String) + // +kubebuilder:validation:Optional + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` -// (String) -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // (String) + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -OverrideVariant []OverrideVariantParameters `json:"overrideVariant,omitempty" tf:"override_variant,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + OverrideVariant []OverrideVariantParameters `json:"overrideVariant,omitempty" tf:"override_variant,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -SecurityRule []SecurityRuleParameters `json:"securityRule,omitempty" tf:"security_rule,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + SecurityRule []SecurityRuleParameters `json:"securityRule,omitempty" tf:"security_rule,omitempty"` -// (String) -// +kubebuilder:validation:Optional -StyleJSON *string `json:"styleJson,omitempty" tf:"style_json,omitempty"` + // (String) + // +kubebuilder:validation:Optional + StyleJSON *string `json:"styleJson,omitempty" tf:"style_json,omitempty"` -// (Boolean) -// +kubebuilder:validation:Optional -TurnOffHostnameCheck *bool `json:"turnOffHostnameCheck,omitempty" tf:"turn_off_hostname_check,omitempty"` + // (Boolean) + // +kubebuilder:validation:Optional + TurnOffHostnameCheck *bool `json:"turnOffHostnameCheck,omitempty" tf:"turn_off_hostname_check,omitempty"` } - type ConditionInitParameters struct { + // (Block List) (see below for nested schema) + Headers []HeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` -// (Block List) (see below for nested schema) -Headers []HeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -Host []HostInitParameters `json:"host,omitempty" tf:"host,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Host []HostInitParameters `json:"host,omitempty" tf:"host,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SourceIP []SourceIPInitParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SourceIP []SourceIPInitParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -URI []URIInitParameters `json:"uri,omitempty" tf:"uri,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + URI []URIInitParameters `json:"uri,omitempty" tf:"uri,omitempty"` } - type ConditionObservation struct { + // (Block List) (see below for nested schema) + Headers []HeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` -// (Block List) (see below for nested schema) -Headers []HeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + Host []HostObservation `json:"host,omitempty" tf:"host,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Host []HostObservation `json:"host,omitempty" tf:"host,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + SourceIP []SourceIPObservation `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -SourceIP []SourceIPObservation `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -URI []URIObservation `json:"uri,omitempty" tf:"uri,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + URI []URIObservation `json:"uri,omitempty" tf:"uri,omitempty"` } - type ConditionParameters struct { + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Headers []HeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Headers []HeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Host []HostParameters `json:"host,omitempty" tf:"host,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Host []HostParameters `json:"host,omitempty" tf:"host,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + SourceIP []SourceIPParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -SourceIP []SourceIPParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -URI []URIParameters `json:"uri,omitempty" tf:"uri,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + URI []URIParameters `json:"uri,omitempty" tf:"uri,omitempty"` } - type GeoIPMatchInitParameters struct { - -// (List of String) -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // (List of String) + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPMatchObservation struct { - -// (List of String) -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // (List of String) + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPMatchParameters struct { - -// (List of String) -// +kubebuilder:validation:Optional -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPNotMatchInitParameters struct { - -// (List of String) -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // (List of String) + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPNotMatchObservation struct { - -// (List of String) -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // (List of String) + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPNotMatchParameters struct { - -// (List of String) -// +kubebuilder:validation:Optional -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type HeadersInitParameters struct { + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (Block List, Min: 1, Max: 1) (see below for nested schema) -Value []ValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + // (Block List, Min: 1, Max: 1) (see below for nested schema) + Value []ValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type HeadersObservation struct { + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (Block List, Min: 1, Max: 1) (see below for nested schema) -Value []ValueObservation `json:"value,omitempty" tf:"value,omitempty"` + // (Block List, Min: 1, Max: 1) (see below for nested schema) + Value []ValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type HeadersParameters struct { + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// (Block List, Min: 1, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Value []ValueParameters `json:"value" tf:"value,omitempty"` + // (Block List, Min: 1, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Value []ValueParameters `json:"value" tf:"value,omitempty"` } - type HostInitParameters struct { - -// (Block List) (see below for nested schema) -Hosts []HostsInitParameters `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Block List) (see below for nested schema) + Hosts []HostsInitParameters `json:"hosts,omitempty" tf:"hosts,omitempty"` } - type HostObservation struct { - -// (Block List) (see below for nested schema) -Hosts []HostsObservation `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Block List) (see below for nested schema) + Hosts []HostsObservation `json:"hosts,omitempty" tf:"hosts,omitempty"` } - type HostParameters struct { - -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Hosts []HostsParameters `json:"hosts,omitempty" tf:"hosts,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Hosts []HostsParameters `json:"hosts,omitempty" tf:"hosts,omitempty"` } - type HostsInitParameters struct { + // (String) + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// (String) -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // (String) + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HostsObservation struct { + // (String) + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// (String) -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // (String) + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HostsParameters struct { + // (String) + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type IPRangesMatchInitParameters struct { - -// (List of String) -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // (List of String) + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesMatchObservation struct { - -// (List of String) -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // (List of String) + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesMatchParameters struct { - -// (List of String) -// +kubebuilder:validation:Optional -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesNotMatchInitParameters struct { - -// (List of String) -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // (List of String) + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesNotMatchObservation struct { - -// (List of String) -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // (List of String) + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesNotMatchParameters struct { - -// (List of String) -// +kubebuilder:validation:Optional -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // (List of String) + // +kubebuilder:validation:Optional + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type OverrideVariantInitParameters struct { + // (String) + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` -// (String) -ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` - -// (String) -Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + // (String) + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` + // (String) + PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` -// (String) -UUID *string `json:"uuid,omitempty" tf:"uuid,omitempty"` + // (String) + UUID *string `json:"uuid,omitempty" tf:"uuid,omitempty"` } - type OverrideVariantObservation struct { + // (String) + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` -// (String) -ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` - -// (String) -Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + // (String) + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` + // (String) + PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` -// (String) -UUID *string `json:"uuid,omitempty" tf:"uuid,omitempty"` + // (String) + UUID *string `json:"uuid,omitempty" tf:"uuid,omitempty"` } - type OverrideVariantParameters struct { + // (String) + // +kubebuilder:validation:Optional + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PreCheckType *string `json:"preCheckType,omitempty" tf:"pre_check_type,omitempty"` -// (String) -// +kubebuilder:validation:Optional -UUID *string `json:"uuid,omitempty" tf:"uuid,omitempty"` + // (String) + // +kubebuilder:validation:Optional + UUID *string `json:"uuid,omitempty" tf:"uuid,omitempty"` } - type PathInitParameters struct { + // (String) + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// (String) -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // (String) + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type PathObservation struct { + // (String) + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // (String) + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// (String) -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type PathParameters struct { + // (String) + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type QueriesInitParameters struct { + // (String) + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// (String) -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// (Block List, Min: 1, Max: 1) (see below for nested schema) -Value []QueriesValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + // (Block List, Min: 1, Max: 1) (see below for nested schema) + Value []QueriesValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type QueriesObservation struct { + // (String) + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// (String) -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -// (Block List, Min: 1, Max: 1) (see below for nested schema) -Value []QueriesValueObservation `json:"value,omitempty" tf:"value,omitempty"` + // (Block List, Min: 1, Max: 1) (see below for nested schema) + Value []QueriesValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type QueriesParameters struct { + // (String) + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Key *string `json:"key" tf:"key,omitempty"` - -// (Block List, Min: 1, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Value []QueriesValueParameters `json:"value" tf:"value,omitempty"` + // (Block List, Min: 1, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Value []QueriesValueParameters `json:"value" tf:"value,omitempty"` } - type QueriesValueInitParameters struct { + // (String) + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // (String) + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// (String) -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type QueriesValueObservation struct { + // (String) + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// (String) -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // (String) + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type QueriesValueParameters struct { + // (String) + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type SecurityRuleInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Condition []ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Condition []ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` - -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -OverrideVariantUUID *string `json:"overrideVariantUuid,omitempty" tf:"override_variant_uuid,omitempty"` + // (String) + OverrideVariantUUID *string `json:"overrideVariantUuid,omitempty" tf:"override_variant_uuid,omitempty"` -// (Number) -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // (Number) + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` } - type SecurityRuleObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Condition []ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Condition []ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` - -// (String) -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -OverrideVariantUUID *string `json:"overrideVariantUuid,omitempty" tf:"override_variant_uuid,omitempty"` + // (String) + OverrideVariantUUID *string `json:"overrideVariantUuid,omitempty" tf:"override_variant_uuid,omitempty"` -// (Number) -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // (Number) + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` } - type SecurityRuleParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Condition []ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Condition []ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// (String) -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // (String) + // +kubebuilder:validation:Optional + OverrideVariantUUID *string `json:"overrideVariantUuid,omitempty" tf:"override_variant_uuid,omitempty"` -// (String) -// +kubebuilder:validation:Optional -OverrideVariantUUID *string `json:"overrideVariantUuid,omitempty" tf:"override_variant_uuid,omitempty"` - -// (Number) -// +kubebuilder:validation:Optional -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // (Number) + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` } - type SourceIPInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + GeoIPMatch []GeoIPMatchInitParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -GeoIPMatch []GeoIPMatchInitParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + GeoIPNotMatch []GeoIPNotMatchInitParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -GeoIPNotMatch []GeoIPNotMatchInitParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + IPRangesMatch []IPRangesMatchInitParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -IPRangesMatch []IPRangesMatchInitParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -IPRangesNotMatch []IPRangesNotMatchInitParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + IPRangesNotMatch []IPRangesNotMatchInitParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type SourceIPObservation struct { + // (Block List, Max: 1) (see below for nested schema) + GeoIPMatch []GeoIPMatchObservation `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -GeoIPMatch []GeoIPMatchObservation `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + GeoIPNotMatch []GeoIPNotMatchObservation `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -GeoIPNotMatch []GeoIPNotMatchObservation `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + IPRangesMatch []IPRangesMatchObservation `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -IPRangesMatch []IPRangesMatchObservation `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -IPRangesNotMatch []IPRangesNotMatchObservation `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + IPRangesNotMatch []IPRangesNotMatchObservation `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type SourceIPParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + GeoIPMatch []GeoIPMatchParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -GeoIPMatch []GeoIPMatchParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` - -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -GeoIPNotMatch []GeoIPNotMatchParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + GeoIPNotMatch []GeoIPNotMatchParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -IPRangesMatch []IPRangesMatchParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + IPRangesMatch []IPRangesMatchParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -IPRangesNotMatch []IPRangesNotMatchParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + IPRangesNotMatch []IPRangesNotMatchParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type URIInitParameters struct { + // (Block List, Max: 1) (see below for nested schema) + Path []PathInitParameters `json:"path,omitempty" tf:"path,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Path []PathInitParameters `json:"path,omitempty" tf:"path,omitempty"` - -// (Block List) (see below for nested schema) -Queries []QueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` + // (Block List) (see below for nested schema) + Queries []QueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` } - type URIObservation struct { + // (Block List, Max: 1) (see below for nested schema) + Path []PathObservation `json:"path,omitempty" tf:"path,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -Path []PathObservation `json:"path,omitempty" tf:"path,omitempty"` - -// (Block List) (see below for nested schema) -Queries []QueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` + // (Block List) (see below for nested schema) + Queries []QueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` } - type URIParameters struct { + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Path []PathParameters `json:"path,omitempty" tf:"path,omitempty"` -// (Block List, Max: 1) (see below for nested schema) -// +kubebuilder:validation:Optional -Path []PathParameters `json:"path,omitempty" tf:"path,omitempty"` - -// (Block List) (see below for nested schema) -// +kubebuilder:validation:Optional -Queries []QueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` + // (Block List) (see below for nested schema) + // +kubebuilder:validation:Optional + Queries []QueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` } - type ValueInitParameters struct { + // (String) + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // (String) + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// (String) -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ValueObservation struct { + // (String) + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// (String) -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // (String) + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ValueParameters struct { + // (String) + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// (String) -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// (String) -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // (String) + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } // CaptchaSpec defines the desired state of Captcha type CaptchaSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider CaptchaParameters `json:"forProvider"` + ForProvider CaptchaParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -963,21 +856,20 @@ type CaptchaSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider CaptchaInitParameters `json:"initProvider,omitempty"` + InitProvider CaptchaInitParameters `json:"initProvider,omitempty"` } // CaptchaStatus defines the observed state of Captcha. type CaptchaStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider CaptchaObservation `json:"atProvider,omitempty"` + AtProvider CaptchaObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - -// Captcha is the Schema for the Captchas API. +// Captcha is the Schema for the Captchas API. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" // +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" diff --git a/apis/smartcaptcha/v1alpha1/zz_generated.conversion_hubs.go b/apis/smartcaptcha/v1alpha1/zz_generated.conversion_hubs.go index d6f8a0b..76cb60f 100755 --- a/apis/smartcaptcha/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/smartcaptcha/v1alpha1/zz_generated.conversion_hubs.go @@ -1,10 +1,6 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 - - // Hub marks this type as a conversion hub. - func (tr *Captcha) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *Captcha) Hub() {} diff --git a/apis/smartcaptcha/v1alpha1/zz_generated.deepcopy.go b/apis/smartcaptcha/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b4cc67a --- /dev/null +++ b/apis/smartcaptcha/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,2070 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Captcha) DeepCopyInto(out *Captcha) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Captcha. +func (in *Captcha) DeepCopy() *Captcha { + if in == nil { + return nil + } + out := new(Captcha) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Captcha) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptchaInitParameters) DeepCopyInto(out *CaptchaInitParameters) { + *out = *in + if in.AllowedSites != nil { + in, out := &in.AllowedSites, &out.AllowedSites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverrideVariant != nil { + in, out := &in.OverrideVariant, &out.OverrideVariant + *out = make([]OverrideVariantInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreCheckType != nil { + in, out := &in.PreCheckType, &out.PreCheckType + *out = new(string) + **out = **in + } + if in.SecurityRule != nil { + in, out := &in.SecurityRule, &out.SecurityRule + *out = make([]SecurityRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StyleJSON != nil { + in, out := &in.StyleJSON, &out.StyleJSON + *out = new(string) + **out = **in + } + if in.TurnOffHostnameCheck != nil { + in, out := &in.TurnOffHostnameCheck, &out.TurnOffHostnameCheck + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptchaInitParameters. +func (in *CaptchaInitParameters) DeepCopy() *CaptchaInitParameters { + if in == nil { + return nil + } + out := new(CaptchaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptchaList) DeepCopyInto(out *CaptchaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Captcha, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptchaList. +func (in *CaptchaList) DeepCopy() *CaptchaList { + if in == nil { + return nil + } + out := new(CaptchaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CaptchaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptchaObservation) DeepCopyInto(out *CaptchaObservation) { + *out = *in + if in.AllowedSites != nil { + in, out := &in.AllowedSites, &out.AllowedSites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } + if in.ClientKey != nil { + in, out := &in.ClientKey, &out.ClientKey + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverrideVariant != nil { + in, out := &in.OverrideVariant, &out.OverrideVariant + *out = make([]OverrideVariantObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreCheckType != nil { + in, out := &in.PreCheckType, &out.PreCheckType + *out = new(string) + **out = **in + } + if in.SecurityRule != nil { + in, out := &in.SecurityRule, &out.SecurityRule + *out = make([]SecurityRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StyleJSON != nil { + in, out := &in.StyleJSON, &out.StyleJSON + *out = new(string) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if in.TurnOffHostnameCheck != nil { + in, out := &in.TurnOffHostnameCheck, &out.TurnOffHostnameCheck + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptchaObservation. +func (in *CaptchaObservation) DeepCopy() *CaptchaObservation { + if in == nil { + return nil + } + out := new(CaptchaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptchaParameters) DeepCopyInto(out *CaptchaParameters) { + *out = *in + if in.AllowedSites != nil { + in, out := &in.AllowedSites, &out.AllowedSites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverrideVariant != nil { + in, out := &in.OverrideVariant, &out.OverrideVariant + *out = make([]OverrideVariantParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreCheckType != nil { + in, out := &in.PreCheckType, &out.PreCheckType + *out = new(string) + **out = **in + } + if in.SecurityRule != nil { + in, out := &in.SecurityRule, &out.SecurityRule + *out = make([]SecurityRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StyleJSON != nil { + in, out := &in.StyleJSON, &out.StyleJSON + *out = new(string) + **out = **in + } + if in.TurnOffHostnameCheck != nil { + in, out := &in.TurnOffHostnameCheck, &out.TurnOffHostnameCheck + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptchaParameters. +func (in *CaptchaParameters) DeepCopy() *CaptchaParameters { + if in == nil { + return nil + } + out := new(CaptchaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptchaSpec) DeepCopyInto(out *CaptchaSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptchaSpec. +func (in *CaptchaSpec) DeepCopy() *CaptchaSpec { + if in == nil { + return nil + } + out := new(CaptchaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptchaStatus) DeepCopyInto(out *CaptchaStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptchaStatus. +func (in *CaptchaStatus) DeepCopy() *CaptchaStatus { + if in == nil { + return nil + } + out := new(CaptchaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]HostInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]SourceIPInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = make([]URIInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]HostObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]SourceIPObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = make([]URIObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = make([]HostParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]SourceIPParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = make([]URIParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPMatchInitParameters) DeepCopyInto(out *GeoIPMatchInitParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPMatchInitParameters. +func (in *GeoIPMatchInitParameters) DeepCopy() *GeoIPMatchInitParameters { + if in == nil { + return nil + } + out := new(GeoIPMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPMatchObservation) DeepCopyInto(out *GeoIPMatchObservation) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPMatchObservation. +func (in *GeoIPMatchObservation) DeepCopy() *GeoIPMatchObservation { + if in == nil { + return nil + } + out := new(GeoIPMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPMatchParameters) DeepCopyInto(out *GeoIPMatchParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPMatchParameters. +func (in *GeoIPMatchParameters) DeepCopy() *GeoIPMatchParameters { + if in == nil { + return nil + } + out := new(GeoIPMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPNotMatchInitParameters) DeepCopyInto(out *GeoIPNotMatchInitParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPNotMatchInitParameters. +func (in *GeoIPNotMatchInitParameters) DeepCopy() *GeoIPNotMatchInitParameters { + if in == nil { + return nil + } + out := new(GeoIPNotMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPNotMatchObservation) DeepCopyInto(out *GeoIPNotMatchObservation) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPNotMatchObservation. +func (in *GeoIPNotMatchObservation) DeepCopy() *GeoIPNotMatchObservation { + if in == nil { + return nil + } + out := new(GeoIPNotMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPNotMatchParameters) DeepCopyInto(out *GeoIPNotMatchParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPNotMatchParameters. +func (in *GeoIPNotMatchParameters) DeepCopy() *GeoIPNotMatchParameters { + if in == nil { + return nil + } + out := new(GeoIPNotMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersInitParameters) DeepCopyInto(out *HeadersInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersInitParameters. +func (in *HeadersInitParameters) DeepCopy() *HeadersInitParameters { + if in == nil { + return nil + } + out := new(HeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersObservation) DeepCopyInto(out *HeadersObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersObservation. +func (in *HeadersObservation) DeepCopy() *HeadersObservation { + if in == nil { + return nil + } + out := new(HeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersParameters) DeepCopyInto(out *HeadersParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersParameters. +func (in *HeadersParameters) DeepCopy() *HeadersParameters { + if in == nil { + return nil + } + out := new(HeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostInitParameters) DeepCopyInto(out *HostInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]HostsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostInitParameters. +func (in *HostInitParameters) DeepCopy() *HostInitParameters { + if in == nil { + return nil + } + out := new(HostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostObservation) DeepCopyInto(out *HostObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]HostsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostObservation. +func (in *HostObservation) DeepCopy() *HostObservation { + if in == nil { + return nil + } + out := new(HostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostParameters) DeepCopyInto(out *HostParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]HostsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostParameters. +func (in *HostParameters) DeepCopy() *HostParameters { + if in == nil { + return nil + } + out := new(HostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostsInitParameters) DeepCopyInto(out *HostsInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostsInitParameters. +func (in *HostsInitParameters) DeepCopy() *HostsInitParameters { + if in == nil { + return nil + } + out := new(HostsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostsObservation) DeepCopyInto(out *HostsObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostsObservation. +func (in *HostsObservation) DeepCopy() *HostsObservation { + if in == nil { + return nil + } + out := new(HostsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostsParameters) DeepCopyInto(out *HostsParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostsParameters. +func (in *HostsParameters) DeepCopy() *HostsParameters { + if in == nil { + return nil + } + out := new(HostsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesMatchInitParameters) DeepCopyInto(out *IPRangesMatchInitParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesMatchInitParameters. +func (in *IPRangesMatchInitParameters) DeepCopy() *IPRangesMatchInitParameters { + if in == nil { + return nil + } + out := new(IPRangesMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesMatchObservation) DeepCopyInto(out *IPRangesMatchObservation) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesMatchObservation. +func (in *IPRangesMatchObservation) DeepCopy() *IPRangesMatchObservation { + if in == nil { + return nil + } + out := new(IPRangesMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesMatchParameters) DeepCopyInto(out *IPRangesMatchParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesMatchParameters. +func (in *IPRangesMatchParameters) DeepCopy() *IPRangesMatchParameters { + if in == nil { + return nil + } + out := new(IPRangesMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesNotMatchInitParameters) DeepCopyInto(out *IPRangesNotMatchInitParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesNotMatchInitParameters. +func (in *IPRangesNotMatchInitParameters) DeepCopy() *IPRangesNotMatchInitParameters { + if in == nil { + return nil + } + out := new(IPRangesNotMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesNotMatchObservation) DeepCopyInto(out *IPRangesNotMatchObservation) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesNotMatchObservation. +func (in *IPRangesNotMatchObservation) DeepCopy() *IPRangesNotMatchObservation { + if in == nil { + return nil + } + out := new(IPRangesNotMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesNotMatchParameters) DeepCopyInto(out *IPRangesNotMatchParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesNotMatchParameters. +func (in *IPRangesNotMatchParameters) DeepCopy() *IPRangesNotMatchParameters { + if in == nil { + return nil + } + out := new(IPRangesNotMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideVariantInitParameters) DeepCopyInto(out *OverrideVariantInitParameters) { + *out = *in + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.PreCheckType != nil { + in, out := &in.PreCheckType, &out.PreCheckType + *out = new(string) + **out = **in + } + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideVariantInitParameters. +func (in *OverrideVariantInitParameters) DeepCopy() *OverrideVariantInitParameters { + if in == nil { + return nil + } + out := new(OverrideVariantInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideVariantObservation) DeepCopyInto(out *OverrideVariantObservation) { + *out = *in + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.PreCheckType != nil { + in, out := &in.PreCheckType, &out.PreCheckType + *out = new(string) + **out = **in + } + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideVariantObservation. +func (in *OverrideVariantObservation) DeepCopy() *OverrideVariantObservation { + if in == nil { + return nil + } + out := new(OverrideVariantObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideVariantParameters) DeepCopyInto(out *OverrideVariantParameters) { + *out = *in + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.PreCheckType != nil { + in, out := &in.PreCheckType, &out.PreCheckType + *out = new(string) + **out = **in + } + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideVariantParameters. +func (in *OverrideVariantParameters) DeepCopy() *OverrideVariantParameters { + if in == nil { + return nil + } + out := new(OverrideVariantParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathInitParameters) DeepCopyInto(out *PathInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathInitParameters. +func (in *PathInitParameters) DeepCopy() *PathInitParameters { + if in == nil { + return nil + } + out := new(PathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathObservation) DeepCopyInto(out *PathObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathObservation. +func (in *PathObservation) DeepCopy() *PathObservation { + if in == nil { + return nil + } + out := new(PathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathParameters) DeepCopyInto(out *PathParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathParameters. +func (in *PathParameters) DeepCopy() *PathParameters { + if in == nil { + return nil + } + out := new(PathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesInitParameters) DeepCopyInto(out *QueriesInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]QueriesValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesInitParameters. +func (in *QueriesInitParameters) DeepCopy() *QueriesInitParameters { + if in == nil { + return nil + } + out := new(QueriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesObservation) DeepCopyInto(out *QueriesObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]QueriesValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesObservation. +func (in *QueriesObservation) DeepCopy() *QueriesObservation { + if in == nil { + return nil + } + out := new(QueriesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesParameters) DeepCopyInto(out *QueriesParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]QueriesValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesParameters. +func (in *QueriesParameters) DeepCopy() *QueriesParameters { + if in == nil { + return nil + } + out := new(QueriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesValueInitParameters) DeepCopyInto(out *QueriesValueInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesValueInitParameters. +func (in *QueriesValueInitParameters) DeepCopy() *QueriesValueInitParameters { + if in == nil { + return nil + } + out := new(QueriesValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesValueObservation) DeepCopyInto(out *QueriesValueObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesValueObservation. +func (in *QueriesValueObservation) DeepCopy() *QueriesValueObservation { + if in == nil { + return nil + } + out := new(QueriesValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesValueParameters) DeepCopyInto(out *QueriesValueParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesValueParameters. +func (in *QueriesValueParameters) DeepCopy() *QueriesValueParameters { + if in == nil { + return nil + } + out := new(QueriesValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityRuleInitParameters) DeepCopyInto(out *SecurityRuleInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverrideVariantUUID != nil { + in, out := &in.OverrideVariantUUID, &out.OverrideVariantUUID + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityRuleInitParameters. +func (in *SecurityRuleInitParameters) DeepCopy() *SecurityRuleInitParameters { + if in == nil { + return nil + } + out := new(SecurityRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityRuleObservation) DeepCopyInto(out *SecurityRuleObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverrideVariantUUID != nil { + in, out := &in.OverrideVariantUUID, &out.OverrideVariantUUID + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityRuleObservation. +func (in *SecurityRuleObservation) DeepCopy() *SecurityRuleObservation { + if in == nil { + return nil + } + out := new(SecurityRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityRuleParameters) DeepCopyInto(out *SecurityRuleParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverrideVariantUUID != nil { + in, out := &in.OverrideVariantUUID, &out.OverrideVariantUUID + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityRuleParameters. +func (in *SecurityRuleParameters) DeepCopy() *SecurityRuleParameters { + if in == nil { + return nil + } + out := new(SecurityRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPInitParameters) DeepCopyInto(out *SourceIPInitParameters) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]GeoIPMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]GeoIPNotMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]IPRangesMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]IPRangesNotMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPInitParameters. +func (in *SourceIPInitParameters) DeepCopy() *SourceIPInitParameters { + if in == nil { + return nil + } + out := new(SourceIPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPObservation) DeepCopyInto(out *SourceIPObservation) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]GeoIPMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]GeoIPNotMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]IPRangesMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]IPRangesNotMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPObservation. +func (in *SourceIPObservation) DeepCopy() *SourceIPObservation { + if in == nil { + return nil + } + out := new(SourceIPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPParameters) DeepCopyInto(out *SourceIPParameters) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]GeoIPMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]GeoIPNotMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]IPRangesMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]IPRangesNotMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPParameters. +func (in *SourceIPParameters) DeepCopy() *SourceIPParameters { + if in == nil { + return nil + } + out := new(SourceIPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URIInitParameters) DeepCopyInto(out *URIInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]PathInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]QueriesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URIInitParameters. +func (in *URIInitParameters) DeepCopy() *URIInitParameters { + if in == nil { + return nil + } + out := new(URIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URIObservation) DeepCopyInto(out *URIObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]PathObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]QueriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URIObservation. +func (in *URIObservation) DeepCopy() *URIObservation { + if in == nil { + return nil + } + out := new(URIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URIParameters) DeepCopyInto(out *URIParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]PathParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]QueriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URIParameters. +func (in *URIParameters) DeepCopy() *URIParameters { + if in == nil { + return nil + } + out := new(URIParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueInitParameters) DeepCopyInto(out *ValueInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueInitParameters. +func (in *ValueInitParameters) DeepCopy() *ValueInitParameters { + if in == nil { + return nil + } + out := new(ValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueObservation) DeepCopyInto(out *ValueObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueObservation. +func (in *ValueObservation) DeepCopy() *ValueObservation { + if in == nil { + return nil + } + out := new(ValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueParameters) DeepCopyInto(out *ValueParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueParameters. +func (in *ValueParameters) DeepCopy() *ValueParameters { + if in == nil { + return nil + } + out := new(ValueParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/smartcaptcha/v1alpha1/zz_generated.resolvers.go b/apis/smartcaptcha/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..69c287f --- /dev/null +++ b/apis/smartcaptcha/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Captcha. +func (mg *Captcha) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/smartcaptcha/v1alpha1/zz_groupversion_info.go b/apis/smartcaptcha/v1alpha1/zz_groupversion_info.go index e729f10..f028a9c 100755 --- a/apis/smartcaptcha/v1alpha1/zz_groupversion_info.go +++ b/apis/smartcaptcha/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/storage/v1alpha1/zz_bucket_terraformed.go b/apis/storage/v1alpha1/zz_bucket_terraformed.go new file mode 100755 index 0000000..5698126 --- /dev/null +++ b/apis/storage/v1alpha1/zz_bucket_terraformed.go @@ -0,0 +1,129 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Bucket +func (mg *Bucket) GetTerraformResourceType() string { + return "yandex_storage_bucket" +} + +// GetConnectionDetailsMapping for this Bucket +func (tr *Bucket) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"secret_key": "secretKeySecretRef"} +} + +// GetObservation of this Bucket +func (tr *Bucket) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Bucket +func (tr *Bucket) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Bucket +func (tr *Bucket) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Bucket +func (tr *Bucket) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Bucket +func (tr *Bucket) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Bucket +func (tr *Bucket) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Bucket +func (tr *Bucket) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Bucket using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Bucket) LateInitialize(attrs []byte) (bool, error) { + params := &BucketParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("AnonymousAccessFlags")) + opts = append(opts, resource.WithNameFilter("AnonymousAccessFlags.ConfigRead")) + opts = append(opts, resource.WithNameFilter("AnonymousAccessFlags.List")) + opts = append(opts, resource.WithNameFilter("AnonymousAccessFlags.Read")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Bucket) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/storage/v1alpha1/zz_bucket_types.go b/apis/storage/v1alpha1/zz_bucket_types.go new file mode 100755 index 0000000..9c42098 --- /dev/null +++ b/apis/storage/v1alpha1/zz_bucket_types.go @@ -0,0 +1,1140 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AndInitParameters struct { + ObjectSizeGreaterThan *float64 `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AndObservation struct { + ObjectSizeGreaterThan *float64 `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AndParameters struct { + + // +kubebuilder:validation:Optional + ObjectSizeGreaterThan *float64 `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + // +kubebuilder:validation:Optional + ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AnonymousAccessFlagsInitParameters struct { + + // Allows to read objects in bucket anonymously. + ConfigRead *bool `json:"configRead,omitempty" tf:"config_read,omitempty"` + + // Allows to list object in bucket anonymously. + List *bool `json:"list,omitempty" tf:"list,omitempty"` + + // Allows to read objects in bucket anonymously. + Read *bool `json:"read,omitempty" tf:"read,omitempty"` +} + +type AnonymousAccessFlagsObservation struct { + + // Allows to read objects in bucket anonymously. + ConfigRead *bool `json:"configRead,omitempty" tf:"config_read,omitempty"` + + // Allows to list object in bucket anonymously. + List *bool `json:"list,omitempty" tf:"list,omitempty"` + + // Allows to read objects in bucket anonymously. + Read *bool `json:"read,omitempty" tf:"read,omitempty"` +} + +type AnonymousAccessFlagsParameters struct { + + // Allows to read objects in bucket anonymously. + // +kubebuilder:validation:Optional + ConfigRead *bool `json:"configRead,omitempty" tf:"config_read,omitempty"` + + // Allows to list object in bucket anonymously. + // +kubebuilder:validation:Optional + List *bool `json:"list,omitempty" tf:"list,omitempty"` + + // Allows to read objects in bucket anonymously. + // +kubebuilder:validation:Optional + Read *bool `json:"read,omitempty" tf:"read,omitempty"` +} + +type ApplyServerSideEncryptionByDefaultInitParameters struct { + + // The KMS master key ID used for the SSE-KMS encryption. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1.SymmetricKey + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + KMSMasterKeyID *string `json:"kmsMasterKeyId,omitempty" tf:"kms_master_key_id,omitempty"` + + // Reference to a SymmetricKey in kms to populate kmsMasterKeyId. + // +kubebuilder:validation:Optional + KMSMasterKeyIDRef *v1.Reference `json:"kmsMasterKeyIdRef,omitempty" tf:"-"` + + // Selector for a SymmetricKey in kms to populate kmsMasterKeyId. + // +kubebuilder:validation:Optional + KMSMasterKeyIDSelector *v1.Selector `json:"kmsMasterKeyIdSelector,omitempty" tf:"-"` + + // The server-side encryption algorithm to use. Single valid value is aws:kms + SseAlgorithm *string `json:"sseAlgorithm,omitempty" tf:"sse_algorithm,omitempty"` +} + +type ApplyServerSideEncryptionByDefaultObservation struct { + + // The KMS master key ID used for the SSE-KMS encryption. + KMSMasterKeyID *string `json:"kmsMasterKeyId,omitempty" tf:"kms_master_key_id,omitempty"` + + // The server-side encryption algorithm to use. Single valid value is aws:kms + SseAlgorithm *string `json:"sseAlgorithm,omitempty" tf:"sse_algorithm,omitempty"` +} + +type ApplyServerSideEncryptionByDefaultParameters struct { + + // The KMS master key ID used for the SSE-KMS encryption. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1.SymmetricKey + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + KMSMasterKeyID *string `json:"kmsMasterKeyId,omitempty" tf:"kms_master_key_id,omitempty"` + + // Reference to a SymmetricKey in kms to populate kmsMasterKeyId. + // +kubebuilder:validation:Optional + KMSMasterKeyIDRef *v1.Reference `json:"kmsMasterKeyIdRef,omitempty" tf:"-"` + + // Selector for a SymmetricKey in kms to populate kmsMasterKeyId. + // +kubebuilder:validation:Optional + KMSMasterKeyIDSelector *v1.Selector `json:"kmsMasterKeyIdSelector,omitempty" tf:"-"` + + // The server-side encryption algorithm to use. Single valid value is aws:kms + // +kubebuilder:validation:Optional + SseAlgorithm *string `json:"sseAlgorithm" tf:"sse_algorithm,omitempty"` +} + +type BucketInitParameters struct { + + // The predefined ACL to apply. Defaults to private. Conflicts with grant. + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // The access key to use when applying changes. This value can also be provided as storage_access_key specified in provider config (explicitly or within shared_credentials_file) is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccountStaticAccessKey + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractAccessKey() + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // Reference to a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeyRef *v1.Reference `json:"accessKeyRef,omitempty" tf:"-"` + + // Selector for a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeySelector *v1.Selector `json:"accessKeySelector,omitempty" tf:"-"` + + // Provides various access to objects. See bucket availability for more infomation. + AnonymousAccessFlags []AnonymousAccessFlagsInitParameters `json:"anonymousAccessFlags,omitempty" tf:"anonymous_access_flags,omitempty"` + + // The name of the bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Creates a unique bucket name beginning with the specified prefix. Conflicts with bucket. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // A rule of Cross-Origin Resource Sharing (documented below). + CorsRule []CorsRuleInitParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // Storage class which is used for storing objects by default. Available values are: "STANDARD", "COLD", "ICE". Default is "STANDARD". See storage class for more inforamtion. + DefaultStorageClass *string `json:"defaultStorageClass,omitempty" tf:"default_storage_class,omitempty"` + + // Allow to create bucket in different folder. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are not recoverable. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // An ACL policy grant. Conflicts with acl. + Grant []GrantInitParameters `json:"grant,omitempty" tf:"grant,omitempty"` + + // Manages https certificates for bucket. See https for more infomation. + HTTPS []HTTPSInitParameters `json:"https,omitempty" tf:"https,omitempty"` + + // A configuration of object lifecycle management (documented below). + LifecycleRule []LifecycleRuleInitParameters `json:"lifecycleRule,omitempty" tf:"lifecycle_rule,omitempty"` + + // A settings of bucket logging (documented below). + Logging []LoggingInitParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // The size of bucket, in bytes. See size limiting for more information. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // A configuration of object lock management (documented below). + ObjectLockConfiguration []ObjectLockConfigurationInitParameters `json:"objectLockConfiguration,omitempty" tf:"object_lock_configuration,omitempty"` + + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // The secret key to use when applying changes. This value can also be provided as storage_secret_key specified in provider config (explicitly or within shared_credentials_file) is used. + SecretKeySecretRef *v1.SecretKeySelector `json:"secretKeySecretRef,omitempty" tf:"-"` + + // A configuration of server-side encryption for the bucket (documented below) + ServerSideEncryptionConfiguration []ServerSideEncryptionConfigurationInitParameters `json:"serverSideEncryptionConfiguration,omitempty" tf:"server_side_encryption_configuration,omitempty"` + + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A state of versioning (documented below) + Versioning []VersioningInitParameters `json:"versioning,omitempty" tf:"versioning,omitempty"` + + // A website object (documented below). + Website []WebsiteInitParameters `json:"website,omitempty" tf:"website,omitempty"` + + // The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. + WebsiteDomain *string `json:"websiteDomain,omitempty" tf:"website_domain,omitempty"` + + // The website endpoint, if the bucket is configured with a website. If not, this will be an empty string. + WebsiteEndpoint *string `json:"websiteEndpoint,omitempty" tf:"website_endpoint,omitempty"` +} + +type BucketObservation struct { + + // The predefined ACL to apply. Defaults to private. Conflicts with grant. + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // The access key to use when applying changes. This value can also be provided as storage_access_key specified in provider config (explicitly or within shared_credentials_file) is used. + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // Provides various access to objects. See bucket availability for more infomation. + AnonymousAccessFlags []AnonymousAccessFlagsObservation `json:"anonymousAccessFlags,omitempty" tf:"anonymous_access_flags,omitempty"` + + // The name of the bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The bucket domain name. + BucketDomainName *string `json:"bucketDomainName,omitempty" tf:"bucket_domain_name,omitempty"` + + // Creates a unique bucket name beginning with the specified prefix. Conflicts with bucket. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // A rule of Cross-Origin Resource Sharing (documented below). + CorsRule []CorsRuleObservation `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // Storage class which is used for storing objects by default. Available values are: "STANDARD", "COLD", "ICE". Default is "STANDARD". See storage class for more inforamtion. + DefaultStorageClass *string `json:"defaultStorageClass,omitempty" tf:"default_storage_class,omitempty"` + + // Allow to create bucket in different folder. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are not recoverable. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // An ACL policy grant. Conflicts with acl. + Grant []GrantObservation `json:"grant,omitempty" tf:"grant,omitempty"` + + // Manages https certificates for bucket. See https for more infomation. + HTTPS []HTTPSObservation `json:"https,omitempty" tf:"https,omitempty"` + + // Canonical user id to grant for. Used only when type is CanonicalUser. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A configuration of object lifecycle management (documented below). + LifecycleRule []LifecycleRuleObservation `json:"lifecycleRule,omitempty" tf:"lifecycle_rule,omitempty"` + + // A settings of bucket logging (documented below). + Logging []LoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` + + // The size of bucket, in bytes. See size limiting for more information. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // A configuration of object lock management (documented below). + ObjectLockConfiguration []ObjectLockConfigurationObservation `json:"objectLockConfiguration,omitempty" tf:"object_lock_configuration,omitempty"` + + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // A configuration of server-side encryption for the bucket (documented below) + ServerSideEncryptionConfiguration []ServerSideEncryptionConfigurationObservation `json:"serverSideEncryptionConfiguration,omitempty" tf:"server_side_encryption_configuration,omitempty"` + + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A state of versioning (documented below) + Versioning []VersioningObservation `json:"versioning,omitempty" tf:"versioning,omitempty"` + + // A website object (documented below). + Website []WebsiteObservation `json:"website,omitempty" tf:"website,omitempty"` + + // The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. + WebsiteDomain *string `json:"websiteDomain,omitempty" tf:"website_domain,omitempty"` + + // The website endpoint, if the bucket is configured with a website. If not, this will be an empty string. + WebsiteEndpoint *string `json:"websiteEndpoint,omitempty" tf:"website_endpoint,omitempty"` +} + +type BucketParameters struct { + + // The predefined ACL to apply. Defaults to private. Conflicts with grant. + // +kubebuilder:validation:Optional + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // The access key to use when applying changes. This value can also be provided as storage_access_key specified in provider config (explicitly or within shared_credentials_file) is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccountStaticAccessKey + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractAccessKey() + // +kubebuilder:validation:Optional + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // Reference to a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeyRef *v1.Reference `json:"accessKeyRef,omitempty" tf:"-"` + + // Selector for a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeySelector *v1.Selector `json:"accessKeySelector,omitempty" tf:"-"` + + // Provides various access to objects. See bucket availability for more infomation. + // +kubebuilder:validation:Optional + AnonymousAccessFlags []AnonymousAccessFlagsParameters `json:"anonymousAccessFlags,omitempty" tf:"anonymous_access_flags,omitempty"` + + // The name of the bucket. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Creates a unique bucket name beginning with the specified prefix. Conflicts with bucket. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // A rule of Cross-Origin Resource Sharing (documented below). + // +kubebuilder:validation:Optional + CorsRule []CorsRuleParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // Storage class which is used for storing objects by default. Available values are: "STANDARD", "COLD", "ICE". Default is "STANDARD". See storage class for more inforamtion. + // +kubebuilder:validation:Optional + DefaultStorageClass *string `json:"defaultStorageClass,omitempty" tf:"default_storage_class,omitempty"` + + // Allow to create bucket in different folder. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are not recoverable. + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // An ACL policy grant. Conflicts with acl. + // +kubebuilder:validation:Optional + Grant []GrantParameters `json:"grant,omitempty" tf:"grant,omitempty"` + + // Manages https certificates for bucket. See https for more infomation. + // +kubebuilder:validation:Optional + HTTPS []HTTPSParameters `json:"https,omitempty" tf:"https,omitempty"` + + // A configuration of object lifecycle management (documented below). + // +kubebuilder:validation:Optional + LifecycleRule []LifecycleRuleParameters `json:"lifecycleRule,omitempty" tf:"lifecycle_rule,omitempty"` + + // A settings of bucket logging (documented below). + // +kubebuilder:validation:Optional + Logging []LoggingParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // The size of bucket, in bytes. See size limiting for more information. + // +kubebuilder:validation:Optional + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // A configuration of object lock management (documented below). + // +kubebuilder:validation:Optional + ObjectLockConfiguration []ObjectLockConfigurationParameters `json:"objectLockConfiguration,omitempty" tf:"object_lock_configuration,omitempty"` + + // +kubebuilder:validation:Optional + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // The secret key to use when applying changes. This value can also be provided as storage_secret_key specified in provider config (explicitly or within shared_credentials_file) is used. + // +kubebuilder:validation:Optional + SecretKeySecretRef *v1.SecretKeySelector `json:"secretKeySecretRef,omitempty" tf:"-"` + + // A configuration of server-side encryption for the bucket (documented below) + // +kubebuilder:validation:Optional + ServerSideEncryptionConfiguration []ServerSideEncryptionConfigurationParameters `json:"serverSideEncryptionConfiguration,omitempty" tf:"server_side_encryption_configuration,omitempty"` + + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A state of versioning (documented below) + // +kubebuilder:validation:Optional + Versioning []VersioningParameters `json:"versioning,omitempty" tf:"versioning,omitempty"` + + // A website object (documented below). + // +kubebuilder:validation:Optional + Website []WebsiteParameters `json:"website,omitempty" tf:"website,omitempty"` + + // The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. + // +kubebuilder:validation:Optional + WebsiteDomain *string `json:"websiteDomain,omitempty" tf:"website_domain,omitempty"` + + // The website endpoint, if the bucket is configured with a website. If not, this will be an empty string. + // +kubebuilder:validation:Optional + WebsiteEndpoint *string `json:"websiteEndpoint,omitempty" tf:"website_endpoint,omitempty"` +} + +type CorsRuleInitParameters struct { + + // Specifies which headers are allowed. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // Specifies which methods are allowed. Can be GET, PUT, POST, DELETE or HEAD. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // Specifies which origins are allowed. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Specifies expose header in the response. + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // Specifies time in seconds that browser can cache the response for a preflight request. + MaxAgeSeconds *float64 `json:"maxAgeSeconds,omitempty" tf:"max_age_seconds,omitempty"` +} + +type CorsRuleObservation struct { + + // Specifies which headers are allowed. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // Specifies which methods are allowed. Can be GET, PUT, POST, DELETE or HEAD. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // Specifies which origins are allowed. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Specifies expose header in the response. + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // Specifies time in seconds that browser can cache the response for a preflight request. + MaxAgeSeconds *float64 `json:"maxAgeSeconds,omitempty" tf:"max_age_seconds,omitempty"` +} + +type CorsRuleParameters struct { + + // Specifies which headers are allowed. + // +kubebuilder:validation:Optional + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // Specifies which methods are allowed. Can be GET, PUT, POST, DELETE or HEAD. + // +kubebuilder:validation:Optional + AllowedMethods []*string `json:"allowedMethods" tf:"allowed_methods,omitempty"` + + // Specifies which origins are allowed. + // +kubebuilder:validation:Optional + AllowedOrigins []*string `json:"allowedOrigins" tf:"allowed_origins,omitempty"` + + // Specifies expose header in the response. + // +kubebuilder:validation:Optional + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // Specifies time in seconds that browser can cache the response for a preflight request. + // +kubebuilder:validation:Optional + MaxAgeSeconds *float64 `json:"maxAgeSeconds,omitempty" tf:"max_age_seconds,omitempty"` +} + +type DefaultRetentionInitParameters struct { + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies a type of object lock. One of ["GOVERNANCE", "COMPLIANCE"]. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Specifies a retention period in years after uploading an object version. It must be a positive integer. You can't set it simultaneously with days. + Years *float64 `json:"years,omitempty" tf:"years,omitempty"` +} + +type DefaultRetentionObservation struct { + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies a type of object lock. One of ["GOVERNANCE", "COMPLIANCE"]. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Specifies a retention period in years after uploading an object version. It must be a positive integer. You can't set it simultaneously with days. + Years *float64 `json:"years,omitempty" tf:"years,omitempty"` +} + +type DefaultRetentionParameters struct { + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies a type of object lock. One of ["GOVERNANCE", "COMPLIANCE"]. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // Specifies a retention period in years after uploading an object version. It must be a positive integer. You can't set it simultaneously with days. + // +kubebuilder:validation:Optional + Years *float64 `json:"years,omitempty" tf:"years,omitempty"` +} + +type ExpirationInitParameters struct { + + // Specifies the date after which you want the corresponding action to take effect. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Object Storage to delete expired object delete markers. + ExpiredObjectDeleteMarker *bool `json:"expiredObjectDeleteMarker,omitempty" tf:"expired_object_delete_marker,omitempty"` +} + +type ExpirationObservation struct { + + // Specifies the date after which you want the corresponding action to take effect. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Object Storage to delete expired object delete markers. + ExpiredObjectDeleteMarker *bool `json:"expiredObjectDeleteMarker,omitempty" tf:"expired_object_delete_marker,omitempty"` +} + +type ExpirationParameters struct { + + // Specifies the date after which you want the corresponding action to take effect. + // +kubebuilder:validation:Optional + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Object Storage to delete expired object delete markers. + // +kubebuilder:validation:Optional + ExpiredObjectDeleteMarker *bool `json:"expiredObjectDeleteMarker,omitempty" tf:"expired_object_delete_marker,omitempty"` +} + +type FilterInitParameters struct { + + // operator applied to one or more filter parameters. It should be used when two or more of the above parameters are used. It supports the following parameters: + And []AndInitParameters `json:"and,omitempty" tf:"and,omitempty"` + + ObjectSizeGreaterThan *float64 `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + Tag []TagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type FilterObservation struct { + + // operator applied to one or more filter parameters. It should be used when two or more of the above parameters are used. It supports the following parameters: + And []AndObservation `json:"and,omitempty" tf:"and,omitempty"` + + ObjectSizeGreaterThan *float64 `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + Tag []TagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type FilterParameters struct { + + // operator applied to one or more filter parameters. It should be used when two or more of the above parameters are used. It supports the following parameters: + // +kubebuilder:validation:Optional + And []AndParameters `json:"and,omitempty" tf:"and,omitempty"` + + // +kubebuilder:validation:Optional + ObjectSizeGreaterThan *float64 `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + // +kubebuilder:validation:Optional + ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // +kubebuilder:validation:Optional + Tag []TagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type GrantInitParameters struct { + + // Canonical user id to grant for. Used only when type is CanonicalUser. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of permissions to apply for grantee. Valid values are READ, WRITE, FULL_CONTROL. + // +listType=set + Permissions []*string `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // Type of grantee to apply for. Valid values are CanonicalUser and Group. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Uri address to grant for. Used only when type is Group. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type GrantObservation struct { + + // Canonical user id to grant for. Used only when type is CanonicalUser. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of permissions to apply for grantee. Valid values are READ, WRITE, FULL_CONTROL. + // +listType=set + Permissions []*string `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // Type of grantee to apply for. Valid values are CanonicalUser and Group. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Uri address to grant for. Used only when type is Group. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type GrantParameters struct { + + // Canonical user id to grant for. Used only when type is CanonicalUser. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of permissions to apply for grantee. Valid values are READ, WRITE, FULL_CONTROL. + // +kubebuilder:validation:Optional + // +listType=set + Permissions []*string `json:"permissions" tf:"permissions,omitempty"` + + // Type of grantee to apply for. Valid values are CanonicalUser and Group. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Uri address to grant for. Used only when type is Group. + // +kubebuilder:validation:Optional + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type HTTPSInitParameters struct { + + // — Id of the certificate in Certificate Manager, that will be used for bucket. + CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` +} + +type HTTPSObservation struct { + + // — Id of the certificate in Certificate Manager, that will be used for bucket. + CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` +} + +type HTTPSParameters struct { + + // — Id of the certificate in Certificate Manager, that will be used for bucket. + // +kubebuilder:validation:Optional + CertificateID *string `json:"certificateId" tf:"certificate_id,omitempty"` +} + +type LifecycleRuleInitParameters struct { + + // Specifies the number of days after initiating a multipart upload when the multipart upload must be completed. + AbortIncompleteMultipartUploadDays *float64 `json:"abortIncompleteMultipartUploadDays,omitempty" tf:"abort_incomplete_multipart_upload_days,omitempty"` + + // Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies a period in the object's expire (documented below). + Expiration []ExpirationInitParameters `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Filter block identifies one or more objects to which the rule applies. A Filter must have exactly one of Prefix, Tag, or And specified. The filter supports the following options: + Filter []FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Canonical user id to grant for. Used only when type is CanonicalUser. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies when noncurrent object versions expire (documented below). + NoncurrentVersionExpiration []NoncurrentVersionExpirationInitParameters `json:"noncurrentVersionExpiration,omitempty" tf:"noncurrent_version_expiration,omitempty"` + + // Specifies when noncurrent object versions transitions (documented below). + NoncurrentVersionTransition []NoncurrentVersionTransitionInitParameters `json:"noncurrentVersionTransition,omitempty" tf:"noncurrent_version_transition,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Specifies a period in the object's transitions (documented below). + Transition []TransitionInitParameters `json:"transition,omitempty" tf:"transition,omitempty"` +} + +type LifecycleRuleObservation struct { + + // Specifies the number of days after initiating a multipart upload when the multipart upload must be completed. + AbortIncompleteMultipartUploadDays *float64 `json:"abortIncompleteMultipartUploadDays,omitempty" tf:"abort_incomplete_multipart_upload_days,omitempty"` + + // Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies a period in the object's expire (documented below). + Expiration []ExpirationObservation `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Filter block identifies one or more objects to which the rule applies. A Filter must have exactly one of Prefix, Tag, or And specified. The filter supports the following options: + Filter []FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // Canonical user id to grant for. Used only when type is CanonicalUser. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies when noncurrent object versions expire (documented below). + NoncurrentVersionExpiration []NoncurrentVersionExpirationObservation `json:"noncurrentVersionExpiration,omitempty" tf:"noncurrent_version_expiration,omitempty"` + + // Specifies when noncurrent object versions transitions (documented below). + NoncurrentVersionTransition []NoncurrentVersionTransitionObservation `json:"noncurrentVersionTransition,omitempty" tf:"noncurrent_version_transition,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Specifies a period in the object's transitions (documented below). + Transition []TransitionObservation `json:"transition,omitempty" tf:"transition,omitempty"` +} + +type LifecycleRuleParameters struct { + + // Specifies the number of days after initiating a multipart upload when the multipart upload must be completed. + // +kubebuilder:validation:Optional + AbortIncompleteMultipartUploadDays *float64 `json:"abortIncompleteMultipartUploadDays,omitempty" tf:"abort_incomplete_multipart_upload_days,omitempty"` + + // Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Specifies a period in the object's expire (documented below). + // +kubebuilder:validation:Optional + Expiration []ExpirationParameters `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Filter block identifies one or more objects to which the rule applies. A Filter must have exactly one of Prefix, Tag, or And specified. The filter supports the following options: + // +kubebuilder:validation:Optional + Filter []FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Canonical user id to grant for. Used only when type is CanonicalUser. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies when noncurrent object versions expire (documented below). + // +kubebuilder:validation:Optional + NoncurrentVersionExpiration []NoncurrentVersionExpirationParameters `json:"noncurrentVersionExpiration,omitempty" tf:"noncurrent_version_expiration,omitempty"` + + // Specifies when noncurrent object versions transitions (documented below). + // +kubebuilder:validation:Optional + NoncurrentVersionTransition []NoncurrentVersionTransitionParameters `json:"noncurrentVersionTransition,omitempty" tf:"noncurrent_version_transition,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Specifies a period in the object's transitions (documented below). + // +kubebuilder:validation:Optional + Transition []TransitionParameters `json:"transition,omitempty" tf:"transition,omitempty"` +} + +type LoggingInitParameters struct { + + // The name of the bucket that will receive the log objects. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TargetBucket *string `json:"targetBucket,omitempty" tf:"target_bucket,omitempty"` + + // Reference to a Bucket in storage to populate targetBucket. + // +kubebuilder:validation:Optional + TargetBucketRef *v1.Reference `json:"targetBucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in storage to populate targetBucket. + // +kubebuilder:validation:Optional + TargetBucketSelector *v1.Selector `json:"targetBucketSelector,omitempty" tf:"-"` + + // To specify a key prefix for log objects. + TargetPrefix *string `json:"targetPrefix,omitempty" tf:"target_prefix,omitempty"` +} + +type LoggingObservation struct { + + // The name of the bucket that will receive the log objects. + TargetBucket *string `json:"targetBucket,omitempty" tf:"target_bucket,omitempty"` + + // To specify a key prefix for log objects. + TargetPrefix *string `json:"targetPrefix,omitempty" tf:"target_prefix,omitempty"` +} + +type LoggingParameters struct { + + // The name of the bucket that will receive the log objects. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TargetBucket *string `json:"targetBucket,omitempty" tf:"target_bucket,omitempty"` + + // Reference to a Bucket in storage to populate targetBucket. + // +kubebuilder:validation:Optional + TargetBucketRef *v1.Reference `json:"targetBucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in storage to populate targetBucket. + // +kubebuilder:validation:Optional + TargetBucketSelector *v1.Selector `json:"targetBucketSelector,omitempty" tf:"-"` + + // To specify a key prefix for log objects. + // +kubebuilder:validation:Optional + TargetPrefix *string `json:"targetPrefix,omitempty" tf:"target_prefix,omitempty"` +} + +type NoncurrentVersionExpirationInitParameters struct { + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type NoncurrentVersionExpirationObservation struct { + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type NoncurrentVersionExpirationParameters struct { + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type NoncurrentVersionTransitionInitParameters struct { + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies the storage class to which you want the object to transition. Supported values: [STANDARD_IA, COLD, ICE]. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type NoncurrentVersionTransitionObservation struct { + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies the storage class to which you want the object to transition. Supported values: [STANDARD_IA, COLD, ICE]. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type NoncurrentVersionTransitionParameters struct { + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies the storage class to which you want the object to transition. Supported values: [STANDARD_IA, COLD, ICE]. + // +kubebuilder:validation:Optional + StorageClass *string `json:"storageClass" tf:"storage_class,omitempty"` +} + +type ObjectLockConfigurationInitParameters struct { + + // Enable object locking in a bucket. Require versioning to be enabled. + ObjectLockEnabled *string `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // Specifies a default locking configuration for added objects. Require object_lock_enabled to be enabled. + Rule []RuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ObjectLockConfigurationObservation struct { + + // Enable object locking in a bucket. Require versioning to be enabled. + ObjectLockEnabled *string `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // Specifies a default locking configuration for added objects. Require object_lock_enabled to be enabled. + Rule []RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ObjectLockConfigurationParameters struct { + + // Enable object locking in a bucket. Require versioning to be enabled. + // +kubebuilder:validation:Optional + ObjectLockEnabled *string `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // Specifies a default locking configuration for added objects. Require object_lock_enabled to be enabled. + // +kubebuilder:validation:Optional + Rule []RuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type RuleInitParameters struct { + DefaultRetention []DefaultRetentionInitParameters `json:"defaultRetention,omitempty" tf:"default_retention,omitempty"` +} + +type RuleObservation struct { + DefaultRetention []DefaultRetentionObservation `json:"defaultRetention,omitempty" tf:"default_retention,omitempty"` +} + +type RuleParameters struct { + + // +kubebuilder:validation:Optional + DefaultRetention []DefaultRetentionParameters `json:"defaultRetention" tf:"default_retention,omitempty"` +} + +type ServerSideEncryptionConfigurationInitParameters struct { + + // Specifies a default locking configuration for added objects. Require object_lock_enabled to be enabled. + Rule []ServerSideEncryptionConfigurationRuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ServerSideEncryptionConfigurationObservation struct { + + // Specifies a default locking configuration for added objects. Require object_lock_enabled to be enabled. + Rule []ServerSideEncryptionConfigurationRuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ServerSideEncryptionConfigurationParameters struct { + + // Specifies a default locking configuration for added objects. Require object_lock_enabled to be enabled. + // +kubebuilder:validation:Optional + Rule []ServerSideEncryptionConfigurationRuleParameters `json:"rule" tf:"rule,omitempty"` +} + +type ServerSideEncryptionConfigurationRuleInitParameters struct { + + // A single object for setting server-side encryption by default. (documented below) + ApplyServerSideEncryptionByDefault []ApplyServerSideEncryptionByDefaultInitParameters `json:"applyServerSideEncryptionByDefault,omitempty" tf:"apply_server_side_encryption_by_default,omitempty"` +} + +type ServerSideEncryptionConfigurationRuleObservation struct { + + // A single object for setting server-side encryption by default. (documented below) + ApplyServerSideEncryptionByDefault []ApplyServerSideEncryptionByDefaultObservation `json:"applyServerSideEncryptionByDefault,omitempty" tf:"apply_server_side_encryption_by_default,omitempty"` +} + +type ServerSideEncryptionConfigurationRuleParameters struct { + + // A single object for setting server-side encryption by default. (documented below) + // +kubebuilder:validation:Optional + ApplyServerSideEncryptionByDefault []ApplyServerSideEncryptionByDefaultParameters `json:"applyServerSideEncryptionByDefault" tf:"apply_server_side_encryption_by_default,omitempty"` +} + +type TagInitParameters struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagObservation struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagParameters struct { + + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TransitionInitParameters struct { + + // Specifies the date after which you want the corresponding action to take effect. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies the storage class to which you want the object to transition. Supported values: [STANDARD_IA, COLD, ICE]. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type TransitionObservation struct { + + // Specifies the date after which you want the corresponding action to take effect. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies the storage class to which you want the object to transition. Supported values: [STANDARD_IA, COLD, ICE]. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type TransitionParameters struct { + + // Specifies the date after which you want the corresponding action to take effect. + // +kubebuilder:validation:Optional + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Specifies a retention period in days after uploading an object version. It must be a positive integer. You can't set it simultaneously with years. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies the storage class to which you want the object to transition. Supported values: [STANDARD_IA, COLD, ICE]. + // +kubebuilder:validation:Optional + StorageClass *string `json:"storageClass" tf:"storage_class,omitempty"` +} + +type VersioningInitParameters struct { + + // Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type VersioningObservation struct { + + // Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type VersioningParameters struct { + + // Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type WebsiteInitParameters struct { + + // An absolute path to the document to return in case of a 4XX error. + ErrorDocument *string `json:"errorDocument,omitempty" tf:"error_document,omitempty"` + + // Storage returns this index document when requests are made to the root domain or any of the subfolders. + IndexDocument *string `json:"indexDocument,omitempty" tf:"index_document,omitempty"` + + // A hostname to redirect all website requests for this bucket to. Hostname can optionally be prefixed with a protocol (http:// or https://) to use when redirecting requests. The default is the protocol that is used in the original request. + RedirectAllRequestsTo *string `json:"redirectAllRequestsTo,omitempty" tf:"redirect_all_requests_to,omitempty"` + + // A json array containing routing rules describing redirect behavior and when redirects are applied. + RoutingRules *string `json:"routingRules,omitempty" tf:"routing_rules,omitempty"` +} + +type WebsiteObservation struct { + + // An absolute path to the document to return in case of a 4XX error. + ErrorDocument *string `json:"errorDocument,omitempty" tf:"error_document,omitempty"` + + // Storage returns this index document when requests are made to the root domain or any of the subfolders. + IndexDocument *string `json:"indexDocument,omitempty" tf:"index_document,omitempty"` + + // A hostname to redirect all website requests for this bucket to. Hostname can optionally be prefixed with a protocol (http:// or https://) to use when redirecting requests. The default is the protocol that is used in the original request. + RedirectAllRequestsTo *string `json:"redirectAllRequestsTo,omitempty" tf:"redirect_all_requests_to,omitempty"` + + // A json array containing routing rules describing redirect behavior and when redirects are applied. + RoutingRules *string `json:"routingRules,omitempty" tf:"routing_rules,omitempty"` +} + +type WebsiteParameters struct { + + // An absolute path to the document to return in case of a 4XX error. + // +kubebuilder:validation:Optional + ErrorDocument *string `json:"errorDocument,omitempty" tf:"error_document,omitempty"` + + // Storage returns this index document when requests are made to the root domain or any of the subfolders. + // +kubebuilder:validation:Optional + IndexDocument *string `json:"indexDocument,omitempty" tf:"index_document,omitempty"` + + // A hostname to redirect all website requests for this bucket to. Hostname can optionally be prefixed with a protocol (http:// or https://) to use when redirecting requests. The default is the protocol that is used in the original request. + // +kubebuilder:validation:Optional + RedirectAllRequestsTo *string `json:"redirectAllRequestsTo,omitempty" tf:"redirect_all_requests_to,omitempty"` + + // A json array containing routing rules describing redirect behavior and when redirects are applied. + // +kubebuilder:validation:Optional + RoutingRules *string `json:"routingRules,omitempty" tf:"routing_rules,omitempty"` +} + +// BucketSpec defines the desired state of Bucket +type BucketSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketInitParameters `json:"initProvider,omitempty"` +} + +// BucketStatus defines the observed state of Bucket. +type BucketStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Bucket is the Schema for the Buckets API. Allows management of a Yandex.Cloud Storage Bucket. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec BucketSpec `json:"spec"` + Status BucketStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketList contains a list of Buckets +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +// Repository type metadata. +var ( + Bucket_Kind = "Bucket" + Bucket_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Bucket_Kind}.String() + Bucket_KindAPIVersion = Bucket_Kind + "." + CRDGroupVersion.String() + Bucket_GroupVersionKind = CRDGroupVersion.WithKind(Bucket_Kind) +) + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/apis/storage/v1alpha1/zz_generated.conversion_hubs.go b/apis/storage/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..b6cf3aa --- /dev/null +++ b/apis/storage/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,9 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Bucket) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Object) Hub() {} diff --git a/apis/storage/v1alpha1/zz_generated.deepcopy.go b/apis/storage/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..6c51663 --- /dev/null +++ b/apis/storage/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,3113 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndInitParameters) DeepCopyInto(out *AndInitParameters) { + *out = *in + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(float64) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndInitParameters. +func (in *AndInitParameters) DeepCopy() *AndInitParameters { + if in == nil { + return nil + } + out := new(AndInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndObservation) DeepCopyInto(out *AndObservation) { + *out = *in + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(float64) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndObservation. +func (in *AndObservation) DeepCopy() *AndObservation { + if in == nil { + return nil + } + out := new(AndObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndParameters) DeepCopyInto(out *AndParameters) { + *out = *in + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(float64) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndParameters. +func (in *AndParameters) DeepCopy() *AndParameters { + if in == nil { + return nil + } + out := new(AndParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAccessFlagsInitParameters) DeepCopyInto(out *AnonymousAccessFlagsInitParameters) { + *out = *in + if in.ConfigRead != nil { + in, out := &in.ConfigRead, &out.ConfigRead + *out = new(bool) + **out = **in + } + if in.List != nil { + in, out := &in.List, &out.List + *out = new(bool) + **out = **in + } + if in.Read != nil { + in, out := &in.Read, &out.Read + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAccessFlagsInitParameters. +func (in *AnonymousAccessFlagsInitParameters) DeepCopy() *AnonymousAccessFlagsInitParameters { + if in == nil { + return nil + } + out := new(AnonymousAccessFlagsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAccessFlagsObservation) DeepCopyInto(out *AnonymousAccessFlagsObservation) { + *out = *in + if in.ConfigRead != nil { + in, out := &in.ConfigRead, &out.ConfigRead + *out = new(bool) + **out = **in + } + if in.List != nil { + in, out := &in.List, &out.List + *out = new(bool) + **out = **in + } + if in.Read != nil { + in, out := &in.Read, &out.Read + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAccessFlagsObservation. +func (in *AnonymousAccessFlagsObservation) DeepCopy() *AnonymousAccessFlagsObservation { + if in == nil { + return nil + } + out := new(AnonymousAccessFlagsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAccessFlagsParameters) DeepCopyInto(out *AnonymousAccessFlagsParameters) { + *out = *in + if in.ConfigRead != nil { + in, out := &in.ConfigRead, &out.ConfigRead + *out = new(bool) + **out = **in + } + if in.List != nil { + in, out := &in.List, &out.List + *out = new(bool) + **out = **in + } + if in.Read != nil { + in, out := &in.Read, &out.Read + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAccessFlagsParameters. +func (in *AnonymousAccessFlagsParameters) DeepCopy() *AnonymousAccessFlagsParameters { + if in == nil { + return nil + } + out := new(AnonymousAccessFlagsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyServerSideEncryptionByDefaultInitParameters) DeepCopyInto(out *ApplyServerSideEncryptionByDefaultInitParameters) { + *out = *in + if in.KMSMasterKeyID != nil { + in, out := &in.KMSMasterKeyID, &out.KMSMasterKeyID + *out = new(string) + **out = **in + } + if in.KMSMasterKeyIDRef != nil { + in, out := &in.KMSMasterKeyIDRef, &out.KMSMasterKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSMasterKeyIDSelector != nil { + in, out := &in.KMSMasterKeyIDSelector, &out.KMSMasterKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SseAlgorithm != nil { + in, out := &in.SseAlgorithm, &out.SseAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyServerSideEncryptionByDefaultInitParameters. +func (in *ApplyServerSideEncryptionByDefaultInitParameters) DeepCopy() *ApplyServerSideEncryptionByDefaultInitParameters { + if in == nil { + return nil + } + out := new(ApplyServerSideEncryptionByDefaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyServerSideEncryptionByDefaultObservation) DeepCopyInto(out *ApplyServerSideEncryptionByDefaultObservation) { + *out = *in + if in.KMSMasterKeyID != nil { + in, out := &in.KMSMasterKeyID, &out.KMSMasterKeyID + *out = new(string) + **out = **in + } + if in.SseAlgorithm != nil { + in, out := &in.SseAlgorithm, &out.SseAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyServerSideEncryptionByDefaultObservation. +func (in *ApplyServerSideEncryptionByDefaultObservation) DeepCopy() *ApplyServerSideEncryptionByDefaultObservation { + if in == nil { + return nil + } + out := new(ApplyServerSideEncryptionByDefaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyServerSideEncryptionByDefaultParameters) DeepCopyInto(out *ApplyServerSideEncryptionByDefaultParameters) { + *out = *in + if in.KMSMasterKeyID != nil { + in, out := &in.KMSMasterKeyID, &out.KMSMasterKeyID + *out = new(string) + **out = **in + } + if in.KMSMasterKeyIDRef != nil { + in, out := &in.KMSMasterKeyIDRef, &out.KMSMasterKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSMasterKeyIDSelector != nil { + in, out := &in.KMSMasterKeyIDSelector, &out.KMSMasterKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SseAlgorithm != nil { + in, out := &in.SseAlgorithm, &out.SseAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyServerSideEncryptionByDefaultParameters. +func (in *ApplyServerSideEncryptionByDefaultParameters) DeepCopy() *ApplyServerSideEncryptionByDefaultParameters { + if in == nil { + return nil + } + out := new(ApplyServerSideEncryptionByDefaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bucket) DeepCopyInto(out *Bucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInitParameters) DeepCopyInto(out *BucketInitParameters) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.AccessKeyRef != nil { + in, out := &in.AccessKeyRef, &out.AccessKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessKeySelector != nil { + in, out := &in.AccessKeySelector, &out.AccessKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AnonymousAccessFlags != nil { + in, out := &in.AnonymousAccessFlags, &out.AnonymousAccessFlags + *out = make([]AnonymousAccessFlagsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]CorsRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultStorageClass != nil { + in, out := &in.DefaultStorageClass, &out.DefaultStorageClass + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Grant != nil { + in, out := &in.Grant, &out.Grant + *out = make([]GrantInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPS != nil { + in, out := &in.HTTPS, &out.HTTPS + *out = make([]HTTPSInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LifecycleRule != nil { + in, out := &in.LifecycleRule, &out.LifecycleRule + *out = make([]LifecycleRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = make([]LoggingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.ObjectLockConfiguration != nil { + in, out := &in.ObjectLockConfiguration, &out.ObjectLockConfiguration + *out = make([]ObjectLockConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.SecretKeySecretRef != nil { + in, out := &in.SecretKeySecretRef, &out.SecretKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ServerSideEncryptionConfiguration != nil { + in, out := &in.ServerSideEncryptionConfiguration, &out.ServerSideEncryptionConfiguration + *out = make([]ServerSideEncryptionConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Versioning != nil { + in, out := &in.Versioning, &out.Versioning + *out = make([]VersioningInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Website != nil { + in, out := &in.Website, &out.Website + *out = make([]WebsiteInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebsiteDomain != nil { + in, out := &in.WebsiteDomain, &out.WebsiteDomain + *out = new(string) + **out = **in + } + if in.WebsiteEndpoint != nil { + in, out := &in.WebsiteEndpoint, &out.WebsiteEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInitParameters. +func (in *BucketInitParameters) DeepCopy() *BucketInitParameters { + if in == nil { + return nil + } + out := new(BucketInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObservation) DeepCopyInto(out *BucketObservation) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.AnonymousAccessFlags != nil { + in, out := &in.AnonymousAccessFlags, &out.AnonymousAccessFlags + *out = make([]AnonymousAccessFlagsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketDomainName != nil { + in, out := &in.BucketDomainName, &out.BucketDomainName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]CorsRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultStorageClass != nil { + in, out := &in.DefaultStorageClass, &out.DefaultStorageClass + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Grant != nil { + in, out := &in.Grant, &out.Grant + *out = make([]GrantObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPS != nil { + in, out := &in.HTTPS, &out.HTTPS + *out = make([]HTTPSObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LifecycleRule != nil { + in, out := &in.LifecycleRule, &out.LifecycleRule + *out = make([]LifecycleRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = make([]LoggingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.ObjectLockConfiguration != nil { + in, out := &in.ObjectLockConfiguration, &out.ObjectLockConfiguration + *out = make([]ObjectLockConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.ServerSideEncryptionConfiguration != nil { + in, out := &in.ServerSideEncryptionConfiguration, &out.ServerSideEncryptionConfiguration + *out = make([]ServerSideEncryptionConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Versioning != nil { + in, out := &in.Versioning, &out.Versioning + *out = make([]VersioningObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Website != nil { + in, out := &in.Website, &out.Website + *out = make([]WebsiteObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebsiteDomain != nil { + in, out := &in.WebsiteDomain, &out.WebsiteDomain + *out = new(string) + **out = **in + } + if in.WebsiteEndpoint != nil { + in, out := &in.WebsiteEndpoint, &out.WebsiteEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObservation. +func (in *BucketObservation) DeepCopy() *BucketObservation { + if in == nil { + return nil + } + out := new(BucketObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketParameters) DeepCopyInto(out *BucketParameters) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.AccessKeyRef != nil { + in, out := &in.AccessKeyRef, &out.AccessKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessKeySelector != nil { + in, out := &in.AccessKeySelector, &out.AccessKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AnonymousAccessFlags != nil { + in, out := &in.AnonymousAccessFlags, &out.AnonymousAccessFlags + *out = make([]AnonymousAccessFlagsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]CorsRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultStorageClass != nil { + in, out := &in.DefaultStorageClass, &out.DefaultStorageClass + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Grant != nil { + in, out := &in.Grant, &out.Grant + *out = make([]GrantParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPS != nil { + in, out := &in.HTTPS, &out.HTTPS + *out = make([]HTTPSParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LifecycleRule != nil { + in, out := &in.LifecycleRule, &out.LifecycleRule + *out = make([]LifecycleRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = make([]LoggingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.ObjectLockConfiguration != nil { + in, out := &in.ObjectLockConfiguration, &out.ObjectLockConfiguration + *out = make([]ObjectLockConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.SecretKeySecretRef != nil { + in, out := &in.SecretKeySecretRef, &out.SecretKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ServerSideEncryptionConfiguration != nil { + in, out := &in.ServerSideEncryptionConfiguration, &out.ServerSideEncryptionConfiguration + *out = make([]ServerSideEncryptionConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Versioning != nil { + in, out := &in.Versioning, &out.Versioning + *out = make([]VersioningParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Website != nil { + in, out := &in.Website, &out.Website + *out = make([]WebsiteParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebsiteDomain != nil { + in, out := &in.WebsiteDomain, &out.WebsiteDomain + *out = new(string) + **out = **in + } + if in.WebsiteEndpoint != nil { + in, out := &in.WebsiteEndpoint, &out.WebsiteEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketParameters. +func (in *BucketParameters) DeepCopy() *BucketParameters { + if in == nil { + return nil + } + out := new(BucketParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleInitParameters) DeepCopyInto(out *CorsRuleInitParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeSeconds != nil { + in, out := &in.MaxAgeSeconds, &out.MaxAgeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleInitParameters. +func (in *CorsRuleInitParameters) DeepCopy() *CorsRuleInitParameters { + if in == nil { + return nil + } + out := new(CorsRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleObservation) DeepCopyInto(out *CorsRuleObservation) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeSeconds != nil { + in, out := &in.MaxAgeSeconds, &out.MaxAgeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleObservation. +func (in *CorsRuleObservation) DeepCopy() *CorsRuleObservation { + if in == nil { + return nil + } + out := new(CorsRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleParameters) DeepCopyInto(out *CorsRuleParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeSeconds != nil { + in, out := &in.MaxAgeSeconds, &out.MaxAgeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleParameters. +func (in *CorsRuleParameters) DeepCopy() *CorsRuleParameters { + if in == nil { + return nil + } + out := new(CorsRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRetentionInitParameters) DeepCopyInto(out *DefaultRetentionInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRetentionInitParameters. +func (in *DefaultRetentionInitParameters) DeepCopy() *DefaultRetentionInitParameters { + if in == nil { + return nil + } + out := new(DefaultRetentionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRetentionObservation) DeepCopyInto(out *DefaultRetentionObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRetentionObservation. +func (in *DefaultRetentionObservation) DeepCopy() *DefaultRetentionObservation { + if in == nil { + return nil + } + out := new(DefaultRetentionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRetentionParameters) DeepCopyInto(out *DefaultRetentionParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRetentionParameters. +func (in *DefaultRetentionParameters) DeepCopy() *DefaultRetentionParameters { + if in == nil { + return nil + } + out := new(DefaultRetentionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpirationInitParameters) DeepCopyInto(out *ExpirationInitParameters) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.ExpiredObjectDeleteMarker != nil { + in, out := &in.ExpiredObjectDeleteMarker, &out.ExpiredObjectDeleteMarker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirationInitParameters. +func (in *ExpirationInitParameters) DeepCopy() *ExpirationInitParameters { + if in == nil { + return nil + } + out := new(ExpirationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpirationObservation) DeepCopyInto(out *ExpirationObservation) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.ExpiredObjectDeleteMarker != nil { + in, out := &in.ExpiredObjectDeleteMarker, &out.ExpiredObjectDeleteMarker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirationObservation. +func (in *ExpirationObservation) DeepCopy() *ExpirationObservation { + if in == nil { + return nil + } + out := new(ExpirationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpirationParameters) DeepCopyInto(out *ExpirationParameters) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.ExpiredObjectDeleteMarker != nil { + in, out := &in.ExpiredObjectDeleteMarker, &out.ExpiredObjectDeleteMarker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirationParameters. +func (in *ExpirationParameters) DeepCopy() *ExpirationParameters { + if in == nil { + return nil + } + out := new(ExpirationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]AndInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(float64) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]TagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]AndObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(float64) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]TagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]AndParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(float64) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]TagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantInitParameters) DeepCopyInto(out *GrantInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantInitParameters. +func (in *GrantInitParameters) DeepCopy() *GrantInitParameters { + if in == nil { + return nil + } + out := new(GrantInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantObservation) DeepCopyInto(out *GrantObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantObservation. +func (in *GrantObservation) DeepCopy() *GrantObservation { + if in == nil { + return nil + } + out := new(GrantObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantParameters) DeepCopyInto(out *GrantParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantParameters. +func (in *GrantParameters) DeepCopy() *GrantParameters { + if in == nil { + return nil + } + out := new(GrantParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSInitParameters) DeepCopyInto(out *HTTPSInitParameters) { + *out = *in + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSInitParameters. +func (in *HTTPSInitParameters) DeepCopy() *HTTPSInitParameters { + if in == nil { + return nil + } + out := new(HTTPSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSObservation) DeepCopyInto(out *HTTPSObservation) { + *out = *in + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSObservation. +func (in *HTTPSObservation) DeepCopy() *HTTPSObservation { + if in == nil { + return nil + } + out := new(HTTPSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSParameters) DeepCopyInto(out *HTTPSParameters) { + *out = *in + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSParameters. +func (in *HTTPSParameters) DeepCopy() *HTTPSParameters { + if in == nil { + return nil + } + out := new(HTTPSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleRuleInitParameters) DeepCopyInto(out *LifecycleRuleInitParameters) { + *out = *in + if in.AbortIncompleteMultipartUploadDays != nil { + in, out := &in.AbortIncompleteMultipartUploadDays, &out.AbortIncompleteMultipartUploadDays + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = make([]ExpirationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NoncurrentVersionExpiration != nil { + in, out := &in.NoncurrentVersionExpiration, &out.NoncurrentVersionExpiration + *out = make([]NoncurrentVersionExpirationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoncurrentVersionTransition != nil { + in, out := &in.NoncurrentVersionTransition, &out.NoncurrentVersionTransition + *out = make([]NoncurrentVersionTransitionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Transition != nil { + in, out := &in.Transition, &out.Transition + *out = make([]TransitionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleRuleInitParameters. +func (in *LifecycleRuleInitParameters) DeepCopy() *LifecycleRuleInitParameters { + if in == nil { + return nil + } + out := new(LifecycleRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleRuleObservation) DeepCopyInto(out *LifecycleRuleObservation) { + *out = *in + if in.AbortIncompleteMultipartUploadDays != nil { + in, out := &in.AbortIncompleteMultipartUploadDays, &out.AbortIncompleteMultipartUploadDays + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = make([]ExpirationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NoncurrentVersionExpiration != nil { + in, out := &in.NoncurrentVersionExpiration, &out.NoncurrentVersionExpiration + *out = make([]NoncurrentVersionExpirationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoncurrentVersionTransition != nil { + in, out := &in.NoncurrentVersionTransition, &out.NoncurrentVersionTransition + *out = make([]NoncurrentVersionTransitionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Transition != nil { + in, out := &in.Transition, &out.Transition + *out = make([]TransitionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleRuleObservation. +func (in *LifecycleRuleObservation) DeepCopy() *LifecycleRuleObservation { + if in == nil { + return nil + } + out := new(LifecycleRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleRuleParameters) DeepCopyInto(out *LifecycleRuleParameters) { + *out = *in + if in.AbortIncompleteMultipartUploadDays != nil { + in, out := &in.AbortIncompleteMultipartUploadDays, &out.AbortIncompleteMultipartUploadDays + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = make([]ExpirationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NoncurrentVersionExpiration != nil { + in, out := &in.NoncurrentVersionExpiration, &out.NoncurrentVersionExpiration + *out = make([]NoncurrentVersionExpirationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoncurrentVersionTransition != nil { + in, out := &in.NoncurrentVersionTransition, &out.NoncurrentVersionTransition + *out = make([]NoncurrentVersionTransitionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Transition != nil { + in, out := &in.Transition, &out.Transition + *out = make([]TransitionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleRuleParameters. +func (in *LifecycleRuleParameters) DeepCopy() *LifecycleRuleParameters { + if in == nil { + return nil + } + out := new(LifecycleRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInitParameters) DeepCopyInto(out *LoggingInitParameters) { + *out = *in + if in.TargetBucket != nil { + in, out := &in.TargetBucket, &out.TargetBucket + *out = new(string) + **out = **in + } + if in.TargetBucketRef != nil { + in, out := &in.TargetBucketRef, &out.TargetBucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetBucketSelector != nil { + in, out := &in.TargetBucketSelector, &out.TargetBucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetPrefix != nil { + in, out := &in.TargetPrefix, &out.TargetPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInitParameters. +func (in *LoggingInitParameters) DeepCopy() *LoggingInitParameters { + if in == nil { + return nil + } + out := new(LoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingObservation) DeepCopyInto(out *LoggingObservation) { + *out = *in + if in.TargetBucket != nil { + in, out := &in.TargetBucket, &out.TargetBucket + *out = new(string) + **out = **in + } + if in.TargetPrefix != nil { + in, out := &in.TargetPrefix, &out.TargetPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingObservation. +func (in *LoggingObservation) DeepCopy() *LoggingObservation { + if in == nil { + return nil + } + out := new(LoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingParameters) DeepCopyInto(out *LoggingParameters) { + *out = *in + if in.TargetBucket != nil { + in, out := &in.TargetBucket, &out.TargetBucket + *out = new(string) + **out = **in + } + if in.TargetBucketRef != nil { + in, out := &in.TargetBucketRef, &out.TargetBucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetBucketSelector != nil { + in, out := &in.TargetBucketSelector, &out.TargetBucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetPrefix != nil { + in, out := &in.TargetPrefix, &out.TargetPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingParameters. +func (in *LoggingParameters) DeepCopy() *LoggingParameters { + if in == nil { + return nil + } + out := new(LoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionExpirationInitParameters) DeepCopyInto(out *NoncurrentVersionExpirationInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionExpirationInitParameters. +func (in *NoncurrentVersionExpirationInitParameters) DeepCopy() *NoncurrentVersionExpirationInitParameters { + if in == nil { + return nil + } + out := new(NoncurrentVersionExpirationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionExpirationObservation) DeepCopyInto(out *NoncurrentVersionExpirationObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionExpirationObservation. +func (in *NoncurrentVersionExpirationObservation) DeepCopy() *NoncurrentVersionExpirationObservation { + if in == nil { + return nil + } + out := new(NoncurrentVersionExpirationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionExpirationParameters) DeepCopyInto(out *NoncurrentVersionExpirationParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionExpirationParameters. +func (in *NoncurrentVersionExpirationParameters) DeepCopy() *NoncurrentVersionExpirationParameters { + if in == nil { + return nil + } + out := new(NoncurrentVersionExpirationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionTransitionInitParameters) DeepCopyInto(out *NoncurrentVersionTransitionInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionTransitionInitParameters. +func (in *NoncurrentVersionTransitionInitParameters) DeepCopy() *NoncurrentVersionTransitionInitParameters { + if in == nil { + return nil + } + out := new(NoncurrentVersionTransitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionTransitionObservation) DeepCopyInto(out *NoncurrentVersionTransitionObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionTransitionObservation. +func (in *NoncurrentVersionTransitionObservation) DeepCopy() *NoncurrentVersionTransitionObservation { + if in == nil { + return nil + } + out := new(NoncurrentVersionTransitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionTransitionParameters) DeepCopyInto(out *NoncurrentVersionTransitionParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionTransitionParameters. +func (in *NoncurrentVersionTransitionParameters) DeepCopy() *NoncurrentVersionTransitionParameters { + if in == nil { + return nil + } + out := new(NoncurrentVersionTransitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Object) DeepCopyInto(out *Object) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *Object) DeepCopy() *Object { + if in == nil { + return nil + } + out := new(Object) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Object) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectInitParameters) DeepCopyInto(out *ObjectInitParameters) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.AccessKeyRef != nil { + in, out := &in.AccessKeyRef, &out.AccessKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessKeySelector != nil { + in, out := &in.AccessKeySelector, &out.AccessKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentBase64 != nil { + in, out := &in.ContentBase64, &out.ContentBase64 + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ObjectLockLegalHoldStatus != nil { + in, out := &in.ObjectLockLegalHoldStatus, &out.ObjectLockLegalHoldStatus + *out = new(string) + **out = **in + } + if in.ObjectLockMode != nil { + in, out := &in.ObjectLockMode, &out.ObjectLockMode + *out = new(string) + **out = **in + } + if in.ObjectLockRetainUntilDate != nil { + in, out := &in.ObjectLockRetainUntilDate, &out.ObjectLockRetainUntilDate + *out = new(string) + **out = **in + } + if in.SecretKeySecretRef != nil { + in, out := &in.SecretKeySecretRef, &out.SecretKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourceHash != nil { + in, out := &in.SourceHash, &out.SourceHash + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectInitParameters. +func (in *ObjectInitParameters) DeepCopy() *ObjectInitParameters { + if in == nil { + return nil + } + out := new(ObjectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectList) DeepCopyInto(out *ObjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Object, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectList. +func (in *ObjectList) DeepCopy() *ObjectList { + if in == nil { + return nil + } + out := new(ObjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLockConfigurationInitParameters) DeepCopyInto(out *ObjectLockConfigurationInitParameters) { + *out = *in + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLockConfigurationInitParameters. +func (in *ObjectLockConfigurationInitParameters) DeepCopy() *ObjectLockConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ObjectLockConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLockConfigurationObservation) DeepCopyInto(out *ObjectLockConfigurationObservation) { + *out = *in + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLockConfigurationObservation. +func (in *ObjectLockConfigurationObservation) DeepCopy() *ObjectLockConfigurationObservation { + if in == nil { + return nil + } + out := new(ObjectLockConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLockConfigurationParameters) DeepCopyInto(out *ObjectLockConfigurationParameters) { + *out = *in + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLockConfigurationParameters. +func (in *ObjectLockConfigurationParameters) DeepCopy() *ObjectLockConfigurationParameters { + if in == nil { + return nil + } + out := new(ObjectLockConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectObservation) DeepCopyInto(out *ObjectObservation) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentBase64 != nil { + in, out := &in.ContentBase64, &out.ContentBase64 + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ObjectLockLegalHoldStatus != nil { + in, out := &in.ObjectLockLegalHoldStatus, &out.ObjectLockLegalHoldStatus + *out = new(string) + **out = **in + } + if in.ObjectLockMode != nil { + in, out := &in.ObjectLockMode, &out.ObjectLockMode + *out = new(string) + **out = **in + } + if in.ObjectLockRetainUntilDate != nil { + in, out := &in.ObjectLockRetainUntilDate, &out.ObjectLockRetainUntilDate + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourceHash != nil { + in, out := &in.SourceHash, &out.SourceHash + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectObservation. +func (in *ObjectObservation) DeepCopy() *ObjectObservation { + if in == nil { + return nil + } + out := new(ObjectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectParameters) DeepCopyInto(out *ObjectParameters) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.AccessKeyRef != nil { + in, out := &in.AccessKeyRef, &out.AccessKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessKeySelector != nil { + in, out := &in.AccessKeySelector, &out.AccessKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentBase64 != nil { + in, out := &in.ContentBase64, &out.ContentBase64 + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ObjectLockLegalHoldStatus != nil { + in, out := &in.ObjectLockLegalHoldStatus, &out.ObjectLockLegalHoldStatus + *out = new(string) + **out = **in + } + if in.ObjectLockMode != nil { + in, out := &in.ObjectLockMode, &out.ObjectLockMode + *out = new(string) + **out = **in + } + if in.ObjectLockRetainUntilDate != nil { + in, out := &in.ObjectLockRetainUntilDate, &out.ObjectLockRetainUntilDate + *out = new(string) + **out = **in + } + if in.SecretKeySecretRef != nil { + in, out := &in.SecretKeySecretRef, &out.SecretKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourceHash != nil { + in, out := &in.SourceHash, &out.SourceHash + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectParameters. +func (in *ObjectParameters) DeepCopy() *ObjectParameters { + if in == nil { + return nil + } + out := new(ObjectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectSpec) DeepCopyInto(out *ObjectSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectSpec. +func (in *ObjectSpec) DeepCopy() *ObjectSpec { + if in == nil { + return nil + } + out := new(ObjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStatus) DeepCopyInto(out *ObjectStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStatus. +func (in *ObjectStatus) DeepCopy() *ObjectStatus { + if in == nil { + return nil + } + out := new(ObjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { + *out = *in + if in.DefaultRetention != nil { + in, out := &in.DefaultRetention, &out.DefaultRetention + *out = make([]DefaultRetentionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleInitParameters. +func (in *RuleInitParameters) DeepCopy() *RuleInitParameters { + if in == nil { + return nil + } + out := new(RuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { + *out = *in + if in.DefaultRetention != nil { + in, out := &in.DefaultRetention, &out.DefaultRetention + *out = make([]DefaultRetentionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleObservation. +func (in *RuleObservation) DeepCopy() *RuleObservation { + if in == nil { + return nil + } + out := new(RuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { + *out = *in + if in.DefaultRetention != nil { + in, out := &in.DefaultRetention, &out.DefaultRetention + *out = make([]DefaultRetentionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleParameters. +func (in *RuleParameters) DeepCopy() *RuleParameters { + if in == nil { + return nil + } + out := new(RuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationInitParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationInitParameters) { + *out = *in + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]ServerSideEncryptionConfigurationRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationInitParameters. +func (in *ServerSideEncryptionConfigurationInitParameters) DeepCopy() *ServerSideEncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationObservation) DeepCopyInto(out *ServerSideEncryptionConfigurationObservation) { + *out = *in + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]ServerSideEncryptionConfigurationRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationObservation. +func (in *ServerSideEncryptionConfigurationObservation) DeepCopy() *ServerSideEncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationParameters) { + *out = *in + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]ServerSideEncryptionConfigurationRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationParameters. +func (in *ServerSideEncryptionConfigurationParameters) DeepCopy() *ServerSideEncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationRuleInitParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationRuleInitParameters) { + *out = *in + if in.ApplyServerSideEncryptionByDefault != nil { + in, out := &in.ApplyServerSideEncryptionByDefault, &out.ApplyServerSideEncryptionByDefault + *out = make([]ApplyServerSideEncryptionByDefaultInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationRuleInitParameters. +func (in *ServerSideEncryptionConfigurationRuleInitParameters) DeepCopy() *ServerSideEncryptionConfigurationRuleInitParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationRuleObservation) DeepCopyInto(out *ServerSideEncryptionConfigurationRuleObservation) { + *out = *in + if in.ApplyServerSideEncryptionByDefault != nil { + in, out := &in.ApplyServerSideEncryptionByDefault, &out.ApplyServerSideEncryptionByDefault + *out = make([]ApplyServerSideEncryptionByDefaultObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationRuleObservation. +func (in *ServerSideEncryptionConfigurationRuleObservation) DeepCopy() *ServerSideEncryptionConfigurationRuleObservation { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationRuleParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationRuleParameters) { + *out = *in + if in.ApplyServerSideEncryptionByDefault != nil { + in, out := &in.ApplyServerSideEncryptionByDefault, &out.ApplyServerSideEncryptionByDefault + *out = make([]ApplyServerSideEncryptionByDefaultParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationRuleParameters. +func (in *ServerSideEncryptionConfigurationRuleParameters) DeepCopy() *ServerSideEncryptionConfigurationRuleParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagInitParameters) DeepCopyInto(out *TagInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagInitParameters. +func (in *TagInitParameters) DeepCopy() *TagInitParameters { + if in == nil { + return nil + } + out := new(TagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagObservation) DeepCopyInto(out *TagObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagObservation. +func (in *TagObservation) DeepCopy() *TagObservation { + if in == nil { + return nil + } + out := new(TagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagParameters) DeepCopyInto(out *TagParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagParameters. +func (in *TagParameters) DeepCopy() *TagParameters { + if in == nil { + return nil + } + out := new(TagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransitionInitParameters) DeepCopyInto(out *TransitionInitParameters) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransitionInitParameters. +func (in *TransitionInitParameters) DeepCopy() *TransitionInitParameters { + if in == nil { + return nil + } + out := new(TransitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransitionObservation) DeepCopyInto(out *TransitionObservation) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransitionObservation. +func (in *TransitionObservation) DeepCopy() *TransitionObservation { + if in == nil { + return nil + } + out := new(TransitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransitionParameters) DeepCopyInto(out *TransitionParameters) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransitionParameters. +func (in *TransitionParameters) DeepCopy() *TransitionParameters { + if in == nil { + return nil + } + out := new(TransitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersioningInitParameters) DeepCopyInto(out *VersioningInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersioningInitParameters. +func (in *VersioningInitParameters) DeepCopy() *VersioningInitParameters { + if in == nil { + return nil + } + out := new(VersioningInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersioningObservation) DeepCopyInto(out *VersioningObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersioningObservation. +func (in *VersioningObservation) DeepCopy() *VersioningObservation { + if in == nil { + return nil + } + out := new(VersioningObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersioningParameters) DeepCopyInto(out *VersioningParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersioningParameters. +func (in *VersioningParameters) DeepCopy() *VersioningParameters { + if in == nil { + return nil + } + out := new(VersioningParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebsiteInitParameters) DeepCopyInto(out *WebsiteInitParameters) { + *out = *in + if in.ErrorDocument != nil { + in, out := &in.ErrorDocument, &out.ErrorDocument + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(string) + **out = **in + } + if in.RedirectAllRequestsTo != nil { + in, out := &in.RedirectAllRequestsTo, &out.RedirectAllRequestsTo + *out = new(string) + **out = **in + } + if in.RoutingRules != nil { + in, out := &in.RoutingRules, &out.RoutingRules + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebsiteInitParameters. +func (in *WebsiteInitParameters) DeepCopy() *WebsiteInitParameters { + if in == nil { + return nil + } + out := new(WebsiteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebsiteObservation) DeepCopyInto(out *WebsiteObservation) { + *out = *in + if in.ErrorDocument != nil { + in, out := &in.ErrorDocument, &out.ErrorDocument + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(string) + **out = **in + } + if in.RedirectAllRequestsTo != nil { + in, out := &in.RedirectAllRequestsTo, &out.RedirectAllRequestsTo + *out = new(string) + **out = **in + } + if in.RoutingRules != nil { + in, out := &in.RoutingRules, &out.RoutingRules + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebsiteObservation. +func (in *WebsiteObservation) DeepCopy() *WebsiteObservation { + if in == nil { + return nil + } + out := new(WebsiteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebsiteParameters) DeepCopyInto(out *WebsiteParameters) { + *out = *in + if in.ErrorDocument != nil { + in, out := &in.ErrorDocument, &out.ErrorDocument + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(string) + **out = **in + } + if in.RedirectAllRequestsTo != nil { + in, out := &in.RedirectAllRequestsTo, &out.RedirectAllRequestsTo + *out = new(string) + **out = **in + } + if in.RoutingRules != nil { + in, out := &in.RoutingRules, &out.RoutingRules + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebsiteParameters. +func (in *WebsiteParameters) DeepCopy() *WebsiteParameters { + if in == nil { + return nil + } + out := new(WebsiteParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/storage/v1alpha1/zz_generated.resolvers.go b/apis/storage/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..f6a332b --- /dev/null +++ b/apis/storage/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,244 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + common "github.com/tagesjump/provider-upjet-yc/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Bucket. +func (mg *Bucket) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccessKey), + Extract: common.ExtractAccessKey(), + Reference: mg.Spec.ForProvider.AccessKeyRef, + Selector: mg.Spec.ForProvider.AccessKeySelector, + To: reference.To{ + List: &v1alpha1.ServiceAccountStaticAccessKeyList{}, + Managed: &v1alpha1.ServiceAccountStaticAccessKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccessKey") + } + mg.Spec.ForProvider.AccessKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccessKeyRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha11.FolderList{}, + Managed: &v1alpha11.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Logging); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Logging[i3].TargetBucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Logging[i3].TargetBucketRef, + Selector: mg.Spec.ForProvider.Logging[i3].TargetBucketSelector, + To: reference.To{ + List: &BucketList{}, + Managed: &Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Logging[i3].TargetBucket") + } + mg.Spec.ForProvider.Logging[i3].TargetBucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Logging[i3].TargetBucketRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.ServerSideEncryptionConfiguration); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.ServerSideEncryptionConfiguration[i3].Rule); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyIDRef, + Selector: mg.Spec.ForProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyIDSelector, + To: reference.To{ + List: &v1alpha12.SymmetricKeyList{}, + Managed: &v1alpha12.SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyID") + } + mg.Spec.ForProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyIDRef = rsp.ResolvedReference + + } + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AccessKey), + Extract: common.ExtractAccessKey(), + Reference: mg.Spec.InitProvider.AccessKeyRef, + Selector: mg.Spec.InitProvider.AccessKeySelector, + To: reference.To{ + List: &v1alpha1.ServiceAccountStaticAccessKeyList{}, + Managed: &v1alpha1.ServiceAccountStaticAccessKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AccessKey") + } + mg.Spec.InitProvider.AccessKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AccessKeyRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha11.FolderList{}, + Managed: &v1alpha11.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Logging); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Logging[i3].TargetBucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Logging[i3].TargetBucketRef, + Selector: mg.Spec.InitProvider.Logging[i3].TargetBucketSelector, + To: reference.To{ + List: &BucketList{}, + Managed: &Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Logging[i3].TargetBucket") + } + mg.Spec.InitProvider.Logging[i3].TargetBucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Logging[i3].TargetBucketRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.ServerSideEncryptionConfiguration); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.ServerSideEncryptionConfiguration[i3].Rule); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault); i5++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyIDRef, + Selector: mg.Spec.InitProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyIDSelector, + To: reference.To{ + List: &v1alpha12.SymmetricKeyList{}, + Managed: &v1alpha12.SymmetricKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyID") + } + mg.Spec.InitProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServerSideEncryptionConfiguration[i3].Rule[i4].ApplyServerSideEncryptionByDefault[i5].KMSMasterKeyIDRef = rsp.ResolvedReference + + } + } + } + + return nil +} + +// ResolveReferences of this Object. +func (mg *Object) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccessKey), + Extract: common.ExtractAccessKey(), + Reference: mg.Spec.ForProvider.AccessKeyRef, + Selector: mg.Spec.ForProvider.AccessKeySelector, + To: reference.To{ + List: &v1alpha1.ServiceAccountStaticAccessKeyList{}, + Managed: &v1alpha1.ServiceAccountStaticAccessKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccessKey") + } + mg.Spec.ForProvider.AccessKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccessKeyRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{ + List: &BucketList{}, + Managed: &Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AccessKey), + Extract: common.ExtractAccessKey(), + Reference: mg.Spec.InitProvider.AccessKeyRef, + Selector: mg.Spec.InitProvider.AccessKeySelector, + To: reference.To{ + List: &v1alpha1.ServiceAccountStaticAccessKeyList{}, + Managed: &v1alpha1.ServiceAccountStaticAccessKey{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AccessKey") + } + mg.Spec.InitProvider.AccessKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AccessKeyRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{ + List: &BucketList{}, + Managed: &Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/storage/v1alpha1/zz_groupversion_info.go b/apis/storage/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..487d2f3 --- /dev/null +++ b/apis/storage/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=storage.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "storage.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/storage/v1alpha1/zz_object_terraformed.go b/apis/storage/v1alpha1/zz_object_terraformed.go new file mode 100755 index 0000000..6eb27a1 --- /dev/null +++ b/apis/storage/v1alpha1/zz_object_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Object +func (mg *Object) GetTerraformResourceType() string { + return "yandex_storage_object" +} + +// GetConnectionDetailsMapping for this Object +func (tr *Object) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"secret_key": "secretKeySecretRef"} +} + +// GetObservation of this Object +func (tr *Object) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Object +func (tr *Object) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Object +func (tr *Object) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Object +func (tr *Object) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Object +func (tr *Object) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Object +func (tr *Object) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Object +func (tr *Object) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Object using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Object) LateInitialize(attrs []byte) (bool, error) { + params := &ObjectParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Object) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/storage/v1alpha1/zz_object_types.go b/apis/storage/v1alpha1/zz_object_types.go new file mode 100755 index 0000000..7caffb4 --- /dev/null +++ b/apis/storage/v1alpha1/zz_object_types.go @@ -0,0 +1,262 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ObjectInitParameters struct { + + // The predefined ACL to apply. Defaults to private. + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // The access key to use when applying changes. This value can also be provided as storage_access_key specified in provider config (explicitly or within shared_credentials_file) is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccountStaticAccessKey + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractAccessKey() + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // Reference to a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeyRef *v1.Reference `json:"accessKeyRef,omitempty" tf:"-"` + + // Selector for a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeySelector *v1.Selector `json:"accessKeySelector,omitempty" tf:"-"` + + // The name of the containing bucket. + // +crossplane:generate:reference:type=Bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the gzipbase64 function with small text strings. For larger objects, use source to stream the content from a disk file. + ContentBase64 *string `json:"contentBase64,omitempty" tf:"content_base64,omitempty"` + + // A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The name of the object once it is in the bucket. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a legal hold status of an object. Requires object_lock_configuration to be enabled on a bucket. + ObjectLockLegalHoldStatus *string `json:"objectLockLegalHoldStatus,omitempty" tf:"object_lock_legal_hold_status,omitempty"` + + // Specifies a type of object lock. One of ["GOVERNANCE", "COMPLIANCE"]. It must be set simultaneously with object_lock_retain_until_date. Requires object_lock_configuration to be enabled on a bucket. + ObjectLockMode *string `json:"objectLockMode,omitempty" tf:"object_lock_mode,omitempty"` + + // Specifies date and time in RTC3339 format until which an object is to be locked. It must be set simultaneously with object_lock_mode. Requires object_lock_configuration to be enabled on a bucket. + ObjectLockRetainUntilDate *string `json:"objectLockRetainUntilDate,omitempty" tf:"object_lock_retain_until_date,omitempty"` + + // The secret key to use when applying changes. This value can also be provided as storage_secret_key specified in provider config (explicitly or within shared_credentials_file) is used. + SecretKeySecretRef *v1.SecretKeySelector `json:"secretKeySecretRef,omitempty" tf:"-"` + + // The path to a file that will be read and uploaded as raw bytes for the object content. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Used to trigger object update when the source content changes. So the only meaningful value is filemd5("path/to/source") (The value is only stored in state and not saved by Yandex Storage). + SourceHash *string `json:"sourceHash,omitempty" tf:"source_hash,omitempty"` + + // Specifies an object tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ObjectObservation struct { + + // The predefined ACL to apply. Defaults to private. + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // The access key to use when applying changes. This value can also be provided as storage_access_key specified in provider config (explicitly or within shared_credentials_file) is used. + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // The name of the containing bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the gzipbase64 function with small text strings. For larger objects, use source to stream the content from a disk file. + ContentBase64 *string `json:"contentBase64,omitempty" tf:"content_base64,omitempty"` + + // A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The key of the resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the object once it is in the bucket. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a legal hold status of an object. Requires object_lock_configuration to be enabled on a bucket. + ObjectLockLegalHoldStatus *string `json:"objectLockLegalHoldStatus,omitempty" tf:"object_lock_legal_hold_status,omitempty"` + + // Specifies a type of object lock. One of ["GOVERNANCE", "COMPLIANCE"]. It must be set simultaneously with object_lock_retain_until_date. Requires object_lock_configuration to be enabled on a bucket. + ObjectLockMode *string `json:"objectLockMode,omitempty" tf:"object_lock_mode,omitempty"` + + // Specifies date and time in RTC3339 format until which an object is to be locked. It must be set simultaneously with object_lock_mode. Requires object_lock_configuration to be enabled on a bucket. + ObjectLockRetainUntilDate *string `json:"objectLockRetainUntilDate,omitempty" tf:"object_lock_retain_until_date,omitempty"` + + // The path to a file that will be read and uploaded as raw bytes for the object content. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Used to trigger object update when the source content changes. So the only meaningful value is filemd5("path/to/source") (The value is only stored in state and not saved by Yandex Storage). + SourceHash *string `json:"sourceHash,omitempty" tf:"source_hash,omitempty"` + + // Specifies an object tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ObjectParameters struct { + + // The predefined ACL to apply. Defaults to private. + // +kubebuilder:validation:Optional + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // The access key to use when applying changes. This value can also be provided as storage_access_key specified in provider config (explicitly or within shared_credentials_file) is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccountStaticAccessKey + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/common.ExtractAccessKey() + // +kubebuilder:validation:Optional + AccessKey *string `json:"accessKey,omitempty" tf:"access_key,omitempty"` + + // Reference to a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeyRef *v1.Reference `json:"accessKeyRef,omitempty" tf:"-"` + + // Selector for a ServiceAccountStaticAccessKey in iam to populate accessKey. + // +kubebuilder:validation:Optional + AccessKeySelector *v1.Selector `json:"accessKeySelector,omitempty" tf:"-"` + + // The name of the containing bucket. + // +crossplane:generate:reference:type=Bucket + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text. + // +kubebuilder:validation:Optional + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the gzipbase64 function with small text strings. For larger objects, use source to stream the content from a disk file. + // +kubebuilder:validation:Optional + ContentBase64 *string `json:"contentBase64,omitempty" tf:"content_base64,omitempty"` + + // A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The name of the object once it is in the bucket. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a legal hold status of an object. Requires object_lock_configuration to be enabled on a bucket. + // +kubebuilder:validation:Optional + ObjectLockLegalHoldStatus *string `json:"objectLockLegalHoldStatus,omitempty" tf:"object_lock_legal_hold_status,omitempty"` + + // Specifies a type of object lock. One of ["GOVERNANCE", "COMPLIANCE"]. It must be set simultaneously with object_lock_retain_until_date. Requires object_lock_configuration to be enabled on a bucket. + // +kubebuilder:validation:Optional + ObjectLockMode *string `json:"objectLockMode,omitempty" tf:"object_lock_mode,omitempty"` + + // Specifies date and time in RTC3339 format until which an object is to be locked. It must be set simultaneously with object_lock_mode. Requires object_lock_configuration to be enabled on a bucket. + // +kubebuilder:validation:Optional + ObjectLockRetainUntilDate *string `json:"objectLockRetainUntilDate,omitempty" tf:"object_lock_retain_until_date,omitempty"` + + // The secret key to use when applying changes. This value can also be provided as storage_secret_key specified in provider config (explicitly or within shared_credentials_file) is used. + // +kubebuilder:validation:Optional + SecretKeySecretRef *v1.SecretKeySelector `json:"secretKeySecretRef,omitempty" tf:"-"` + + // The path to a file that will be read and uploaded as raw bytes for the object content. + // +kubebuilder:validation:Optional + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Used to trigger object update when the source content changes. So the only meaningful value is filemd5("path/to/source") (The value is only stored in state and not saved by Yandex Storage). + // +kubebuilder:validation:Optional + SourceHash *string `json:"sourceHash,omitempty" tf:"source_hash,omitempty"` + + // Specifies an object tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// ObjectSpec defines the desired state of Object +type ObjectSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ObjectParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ObjectInitParameters `json:"initProvider,omitempty"` +} + +// ObjectStatus defines the observed state of Object. +type ObjectStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ObjectObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Object is the Schema for the Objects API. Allows management of a Yandex.Cloud Storage Object. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Object struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.key) || (has(self.initProvider) && has(self.initProvider.key))",message="spec.forProvider.key is a required parameter" + Spec ObjectSpec `json:"spec"` + Status ObjectStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ObjectList contains a list of Objects +type ObjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Object `json:"items"` +} + +// Repository type metadata. +var ( + Object_Kind = "Object" + Object_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Object_Kind}.String() + Object_KindAPIVersion = Object_Kind + "." + CRDGroupVersion.String() + Object_GroupVersionKind = CRDGroupVersion.WithKind(Object_Kind) +) + +func init() { + SchemeBuilder.Register(&Object{}, &ObjectList{}) +} diff --git a/apis/sws/v1alpha1/zz_generated.conversion_hubs.go b/apis/sws/v1alpha1/zz_generated.conversion_hubs.go index 6fe2c54..9941563 100755 --- a/apis/sws/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/sws/v1alpha1/zz_generated.conversion_hubs.go @@ -1,10 +1,6 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 - - // Hub marks this type as a conversion hub. - func (tr *SecurityProfile) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *SecurityProfile) Hub() {} diff --git a/apis/sws/v1alpha1/zz_generated.deepcopy.go b/apis/sws/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..efec01e --- /dev/null +++ b/apis/sws/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,5720 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthoritiesInitParameters) DeepCopyInto(out *AuthoritiesInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthoritiesInitParameters. +func (in *AuthoritiesInitParameters) DeepCopy() *AuthoritiesInitParameters { + if in == nil { + return nil + } + out := new(AuthoritiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthoritiesObservation) DeepCopyInto(out *AuthoritiesObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthoritiesObservation. +func (in *AuthoritiesObservation) DeepCopy() *AuthoritiesObservation { + if in == nil { + return nil + } + out := new(AuthoritiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthoritiesParameters) DeepCopyInto(out *AuthoritiesParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthoritiesParameters. +func (in *AuthoritiesParameters) DeepCopy() *AuthoritiesParameters { + if in == nil { + return nil + } + out := new(AuthoritiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorityAuthoritiesInitParameters) DeepCopyInto(out *AuthorityAuthoritiesInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorityAuthoritiesInitParameters. +func (in *AuthorityAuthoritiesInitParameters) DeepCopy() *AuthorityAuthoritiesInitParameters { + if in == nil { + return nil + } + out := new(AuthorityAuthoritiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorityAuthoritiesObservation) DeepCopyInto(out *AuthorityAuthoritiesObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorityAuthoritiesObservation. +func (in *AuthorityAuthoritiesObservation) DeepCopy() *AuthorityAuthoritiesObservation { + if in == nil { + return nil + } + out := new(AuthorityAuthoritiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorityAuthoritiesParameters) DeepCopyInto(out *AuthorityAuthoritiesParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorityAuthoritiesParameters. +func (in *AuthorityAuthoritiesParameters) DeepCopy() *AuthorityAuthoritiesParameters { + if in == nil { + return nil + } + out := new(AuthorityAuthoritiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorityInitParameters) DeepCopyInto(out *AuthorityInitParameters) { + *out = *in + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]AuthoritiesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorityInitParameters. +func (in *AuthorityInitParameters) DeepCopy() *AuthorityInitParameters { + if in == nil { + return nil + } + out := new(AuthorityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorityObservation) DeepCopyInto(out *AuthorityObservation) { + *out = *in + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]AuthoritiesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorityObservation. +func (in *AuthorityObservation) DeepCopy() *AuthorityObservation { + if in == nil { + return nil + } + out := new(AuthorityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorityParameters) DeepCopyInto(out *AuthorityParameters) { + *out = *in + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]AuthoritiesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorityParameters. +func (in *AuthorityParameters) DeepCopy() *AuthorityParameters { + if in == nil { + return nil + } + out := new(AuthorityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAuthorityAuthoritiesInitParameters) DeepCopyInto(out *ConditionAuthorityAuthoritiesInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAuthorityAuthoritiesInitParameters. +func (in *ConditionAuthorityAuthoritiesInitParameters) DeepCopy() *ConditionAuthorityAuthoritiesInitParameters { + if in == nil { + return nil + } + out := new(ConditionAuthorityAuthoritiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAuthorityAuthoritiesObservation) DeepCopyInto(out *ConditionAuthorityAuthoritiesObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAuthorityAuthoritiesObservation. +func (in *ConditionAuthorityAuthoritiesObservation) DeepCopy() *ConditionAuthorityAuthoritiesObservation { + if in == nil { + return nil + } + out := new(ConditionAuthorityAuthoritiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAuthorityAuthoritiesParameters) DeepCopyInto(out *ConditionAuthorityAuthoritiesParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAuthorityAuthoritiesParameters. +func (in *ConditionAuthorityAuthoritiesParameters) DeepCopy() *ConditionAuthorityAuthoritiesParameters { + if in == nil { + return nil + } + out := new(ConditionAuthorityAuthoritiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAuthorityInitParameters) DeepCopyInto(out *ConditionAuthorityInitParameters) { + *out = *in + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]AuthorityAuthoritiesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAuthorityInitParameters. +func (in *ConditionAuthorityInitParameters) DeepCopy() *ConditionAuthorityInitParameters { + if in == nil { + return nil + } + out := new(ConditionAuthorityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAuthorityObservation) DeepCopyInto(out *ConditionAuthorityObservation) { + *out = *in + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]AuthorityAuthoritiesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAuthorityObservation. +func (in *ConditionAuthorityObservation) DeepCopy() *ConditionAuthorityObservation { + if in == nil { + return nil + } + out := new(ConditionAuthorityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAuthorityParameters) DeepCopyInto(out *ConditionAuthorityParameters) { + *out = *in + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]AuthorityAuthoritiesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAuthorityParameters. +func (in *ConditionAuthorityParameters) DeepCopy() *ConditionAuthorityParameters { + if in == nil { + return nil + } + out := new(ConditionAuthorityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHTTPMethodHTTPMethodsInitParameters) DeepCopyInto(out *ConditionHTTPMethodHTTPMethodsInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHTTPMethodHTTPMethodsInitParameters. +func (in *ConditionHTTPMethodHTTPMethodsInitParameters) DeepCopy() *ConditionHTTPMethodHTTPMethodsInitParameters { + if in == nil { + return nil + } + out := new(ConditionHTTPMethodHTTPMethodsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHTTPMethodHTTPMethodsObservation) DeepCopyInto(out *ConditionHTTPMethodHTTPMethodsObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHTTPMethodHTTPMethodsObservation. +func (in *ConditionHTTPMethodHTTPMethodsObservation) DeepCopy() *ConditionHTTPMethodHTTPMethodsObservation { + if in == nil { + return nil + } + out := new(ConditionHTTPMethodHTTPMethodsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHTTPMethodHTTPMethodsParameters) DeepCopyInto(out *ConditionHTTPMethodHTTPMethodsParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHTTPMethodHTTPMethodsParameters. +func (in *ConditionHTTPMethodHTTPMethodsParameters) DeepCopy() *ConditionHTTPMethodHTTPMethodsParameters { + if in == nil { + return nil + } + out := new(ConditionHTTPMethodHTTPMethodsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHTTPMethodInitParameters) DeepCopyInto(out *ConditionHTTPMethodInitParameters) { + *out = *in + if in.HTTPMethods != nil { + in, out := &in.HTTPMethods, &out.HTTPMethods + *out = make([]HTTPMethodHTTPMethodsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHTTPMethodInitParameters. +func (in *ConditionHTTPMethodInitParameters) DeepCopy() *ConditionHTTPMethodInitParameters { + if in == nil { + return nil + } + out := new(ConditionHTTPMethodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHTTPMethodObservation) DeepCopyInto(out *ConditionHTTPMethodObservation) { + *out = *in + if in.HTTPMethods != nil { + in, out := &in.HTTPMethods, &out.HTTPMethods + *out = make([]HTTPMethodHTTPMethodsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHTTPMethodObservation. +func (in *ConditionHTTPMethodObservation) DeepCopy() *ConditionHTTPMethodObservation { + if in == nil { + return nil + } + out := new(ConditionHTTPMethodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHTTPMethodParameters) DeepCopyInto(out *ConditionHTTPMethodParameters) { + *out = *in + if in.HTTPMethods != nil { + in, out := &in.HTTPMethods, &out.HTTPMethods + *out = make([]HTTPMethodHTTPMethodsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHTTPMethodParameters. +func (in *ConditionHTTPMethodParameters) DeepCopy() *ConditionHTTPMethodParameters { + if in == nil { + return nil + } + out := new(ConditionHTTPMethodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHeadersInitParameters) DeepCopyInto(out *ConditionHeadersInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]HeadersValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHeadersInitParameters. +func (in *ConditionHeadersInitParameters) DeepCopy() *ConditionHeadersInitParameters { + if in == nil { + return nil + } + out := new(ConditionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHeadersObservation) DeepCopyInto(out *ConditionHeadersObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]HeadersValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHeadersObservation. +func (in *ConditionHeadersObservation) DeepCopy() *ConditionHeadersObservation { + if in == nil { + return nil + } + out := new(ConditionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHeadersParameters) DeepCopyInto(out *ConditionHeadersParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]HeadersValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHeadersParameters. +func (in *ConditionHeadersParameters) DeepCopy() *ConditionHeadersParameters { + if in == nil { + return nil + } + out := new(ConditionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHeadersValueInitParameters) DeepCopyInto(out *ConditionHeadersValueInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHeadersValueInitParameters. +func (in *ConditionHeadersValueInitParameters) DeepCopy() *ConditionHeadersValueInitParameters { + if in == nil { + return nil + } + out := new(ConditionHeadersValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHeadersValueObservation) DeepCopyInto(out *ConditionHeadersValueObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHeadersValueObservation. +func (in *ConditionHeadersValueObservation) DeepCopy() *ConditionHeadersValueObservation { + if in == nil { + return nil + } + out := new(ConditionHeadersValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionHeadersValueParameters) DeepCopyInto(out *ConditionHeadersValueParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionHeadersValueParameters. +func (in *ConditionHeadersValueParameters) DeepCopy() *ConditionHeadersValueParameters { + if in == nil { + return nil + } + out := new(ConditionHeadersValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]AuthorityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]HTTPMethodInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]RequestURIInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]SourceIPInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]AuthorityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]HTTPMethodObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]RequestURIObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]SourceIPObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]AuthorityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]HTTPMethodParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]RequestURIParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]SourceIPParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIInitParameters) DeepCopyInto(out *ConditionRequestURIInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]RequestURIPathInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]RequestURIQueriesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIInitParameters. +func (in *ConditionRequestURIInitParameters) DeepCopy() *ConditionRequestURIInitParameters { + if in == nil { + return nil + } + out := new(ConditionRequestURIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIObservation) DeepCopyInto(out *ConditionRequestURIObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]RequestURIPathObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]RequestURIQueriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIObservation. +func (in *ConditionRequestURIObservation) DeepCopy() *ConditionRequestURIObservation { + if in == nil { + return nil + } + out := new(ConditionRequestURIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIParameters) DeepCopyInto(out *ConditionRequestURIParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]RequestURIPathParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]RequestURIQueriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIParameters. +func (in *ConditionRequestURIParameters) DeepCopy() *ConditionRequestURIParameters { + if in == nil { + return nil + } + out := new(ConditionRequestURIParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIPathInitParameters) DeepCopyInto(out *ConditionRequestURIPathInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIPathInitParameters. +func (in *ConditionRequestURIPathInitParameters) DeepCopy() *ConditionRequestURIPathInitParameters { + if in == nil { + return nil + } + out := new(ConditionRequestURIPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIPathObservation) DeepCopyInto(out *ConditionRequestURIPathObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIPathObservation. +func (in *ConditionRequestURIPathObservation) DeepCopy() *ConditionRequestURIPathObservation { + if in == nil { + return nil + } + out := new(ConditionRequestURIPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIPathParameters) DeepCopyInto(out *ConditionRequestURIPathParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIPathParameters. +func (in *ConditionRequestURIPathParameters) DeepCopy() *ConditionRequestURIPathParameters { + if in == nil { + return nil + } + out := new(ConditionRequestURIPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIQueriesInitParameters) DeepCopyInto(out *ConditionRequestURIQueriesInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ConditionRequestURIQueriesValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIQueriesInitParameters. +func (in *ConditionRequestURIQueriesInitParameters) DeepCopy() *ConditionRequestURIQueriesInitParameters { + if in == nil { + return nil + } + out := new(ConditionRequestURIQueriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIQueriesObservation) DeepCopyInto(out *ConditionRequestURIQueriesObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ConditionRequestURIQueriesValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIQueriesObservation. +func (in *ConditionRequestURIQueriesObservation) DeepCopy() *ConditionRequestURIQueriesObservation { + if in == nil { + return nil + } + out := new(ConditionRequestURIQueriesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIQueriesParameters) DeepCopyInto(out *ConditionRequestURIQueriesParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ConditionRequestURIQueriesValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIQueriesParameters. +func (in *ConditionRequestURIQueriesParameters) DeepCopy() *ConditionRequestURIQueriesParameters { + if in == nil { + return nil + } + out := new(ConditionRequestURIQueriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIQueriesValueInitParameters) DeepCopyInto(out *ConditionRequestURIQueriesValueInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIQueriesValueInitParameters. +func (in *ConditionRequestURIQueriesValueInitParameters) DeepCopy() *ConditionRequestURIQueriesValueInitParameters { + if in == nil { + return nil + } + out := new(ConditionRequestURIQueriesValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIQueriesValueObservation) DeepCopyInto(out *ConditionRequestURIQueriesValueObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIQueriesValueObservation. +func (in *ConditionRequestURIQueriesValueObservation) DeepCopy() *ConditionRequestURIQueriesValueObservation { + if in == nil { + return nil + } + out := new(ConditionRequestURIQueriesValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionRequestURIQueriesValueParameters) DeepCopyInto(out *ConditionRequestURIQueriesValueParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionRequestURIQueriesValueParameters. +func (in *ConditionRequestURIQueriesValueParameters) DeepCopy() *ConditionRequestURIQueriesValueParameters { + if in == nil { + return nil + } + out := new(ConditionRequestURIQueriesValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPGeoIPMatchInitParameters) DeepCopyInto(out *ConditionSourceIPGeoIPMatchInitParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPGeoIPMatchInitParameters. +func (in *ConditionSourceIPGeoIPMatchInitParameters) DeepCopy() *ConditionSourceIPGeoIPMatchInitParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPGeoIPMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPGeoIPMatchObservation) DeepCopyInto(out *ConditionSourceIPGeoIPMatchObservation) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPGeoIPMatchObservation. +func (in *ConditionSourceIPGeoIPMatchObservation) DeepCopy() *ConditionSourceIPGeoIPMatchObservation { + if in == nil { + return nil + } + out := new(ConditionSourceIPGeoIPMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPGeoIPMatchParameters) DeepCopyInto(out *ConditionSourceIPGeoIPMatchParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPGeoIPMatchParameters. +func (in *ConditionSourceIPGeoIPMatchParameters) DeepCopy() *ConditionSourceIPGeoIPMatchParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPGeoIPMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPGeoIPNotMatchInitParameters) DeepCopyInto(out *ConditionSourceIPGeoIPNotMatchInitParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPGeoIPNotMatchInitParameters. +func (in *ConditionSourceIPGeoIPNotMatchInitParameters) DeepCopy() *ConditionSourceIPGeoIPNotMatchInitParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPGeoIPNotMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPGeoIPNotMatchObservation) DeepCopyInto(out *ConditionSourceIPGeoIPNotMatchObservation) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPGeoIPNotMatchObservation. +func (in *ConditionSourceIPGeoIPNotMatchObservation) DeepCopy() *ConditionSourceIPGeoIPNotMatchObservation { + if in == nil { + return nil + } + out := new(ConditionSourceIPGeoIPNotMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPGeoIPNotMatchParameters) DeepCopyInto(out *ConditionSourceIPGeoIPNotMatchParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPGeoIPNotMatchParameters. +func (in *ConditionSourceIPGeoIPNotMatchParameters) DeepCopy() *ConditionSourceIPGeoIPNotMatchParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPGeoIPNotMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPIPRangesMatchInitParameters) DeepCopyInto(out *ConditionSourceIPIPRangesMatchInitParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPIPRangesMatchInitParameters. +func (in *ConditionSourceIPIPRangesMatchInitParameters) DeepCopy() *ConditionSourceIPIPRangesMatchInitParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPIPRangesMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPIPRangesMatchObservation) DeepCopyInto(out *ConditionSourceIPIPRangesMatchObservation) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPIPRangesMatchObservation. +func (in *ConditionSourceIPIPRangesMatchObservation) DeepCopy() *ConditionSourceIPIPRangesMatchObservation { + if in == nil { + return nil + } + out := new(ConditionSourceIPIPRangesMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPIPRangesMatchParameters) DeepCopyInto(out *ConditionSourceIPIPRangesMatchParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPIPRangesMatchParameters. +func (in *ConditionSourceIPIPRangesMatchParameters) DeepCopy() *ConditionSourceIPIPRangesMatchParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPIPRangesMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPIPRangesNotMatchInitParameters) DeepCopyInto(out *ConditionSourceIPIPRangesNotMatchInitParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPIPRangesNotMatchInitParameters. +func (in *ConditionSourceIPIPRangesNotMatchInitParameters) DeepCopy() *ConditionSourceIPIPRangesNotMatchInitParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPIPRangesNotMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPIPRangesNotMatchObservation) DeepCopyInto(out *ConditionSourceIPIPRangesNotMatchObservation) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPIPRangesNotMatchObservation. +func (in *ConditionSourceIPIPRangesNotMatchObservation) DeepCopy() *ConditionSourceIPIPRangesNotMatchObservation { + if in == nil { + return nil + } + out := new(ConditionSourceIPIPRangesNotMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPIPRangesNotMatchParameters) DeepCopyInto(out *ConditionSourceIPIPRangesNotMatchParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPIPRangesNotMatchParameters. +func (in *ConditionSourceIPIPRangesNotMatchParameters) DeepCopy() *ConditionSourceIPIPRangesNotMatchParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPIPRangesNotMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPInitParameters) DeepCopyInto(out *ConditionSourceIPInitParameters) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]SourceIPGeoIPMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]SourceIPGeoIPNotMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]SourceIPIPRangesMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]SourceIPIPRangesNotMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPInitParameters. +func (in *ConditionSourceIPInitParameters) DeepCopy() *ConditionSourceIPInitParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPObservation) DeepCopyInto(out *ConditionSourceIPObservation) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]SourceIPGeoIPMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]SourceIPGeoIPNotMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]SourceIPIPRangesMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]SourceIPIPRangesNotMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPObservation. +func (in *ConditionSourceIPObservation) DeepCopy() *ConditionSourceIPObservation { + if in == nil { + return nil + } + out := new(ConditionSourceIPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSourceIPParameters) DeepCopyInto(out *ConditionSourceIPParameters) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]SourceIPGeoIPMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]SourceIPGeoIPNotMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]SourceIPIPRangesMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]SourceIPIPRangesNotMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSourceIPParameters. +func (in *ConditionSourceIPParameters) DeepCopy() *ConditionSourceIPParameters { + if in == nil { + return nil + } + out := new(ConditionSourceIPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPMatchInitParameters) DeepCopyInto(out *GeoIPMatchInitParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPMatchInitParameters. +func (in *GeoIPMatchInitParameters) DeepCopy() *GeoIPMatchInitParameters { + if in == nil { + return nil + } + out := new(GeoIPMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPMatchObservation) DeepCopyInto(out *GeoIPMatchObservation) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPMatchObservation. +func (in *GeoIPMatchObservation) DeepCopy() *GeoIPMatchObservation { + if in == nil { + return nil + } + out := new(GeoIPMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPMatchParameters) DeepCopyInto(out *GeoIPMatchParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPMatchParameters. +func (in *GeoIPMatchParameters) DeepCopy() *GeoIPMatchParameters { + if in == nil { + return nil + } + out := new(GeoIPMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPNotMatchInitParameters) DeepCopyInto(out *GeoIPNotMatchInitParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPNotMatchInitParameters. +func (in *GeoIPNotMatchInitParameters) DeepCopy() *GeoIPNotMatchInitParameters { + if in == nil { + return nil + } + out := new(GeoIPNotMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPNotMatchObservation) DeepCopyInto(out *GeoIPNotMatchObservation) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPNotMatchObservation. +func (in *GeoIPNotMatchObservation) DeepCopy() *GeoIPNotMatchObservation { + if in == nil { + return nil + } + out := new(GeoIPNotMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoIPNotMatchParameters) DeepCopyInto(out *GeoIPNotMatchParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoIPNotMatchParameters. +func (in *GeoIPNotMatchParameters) DeepCopy() *GeoIPNotMatchParameters { + if in == nil { + return nil + } + out := new(GeoIPNotMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMethodHTTPMethodsInitParameters) DeepCopyInto(out *HTTPMethodHTTPMethodsInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMethodHTTPMethodsInitParameters. +func (in *HTTPMethodHTTPMethodsInitParameters) DeepCopy() *HTTPMethodHTTPMethodsInitParameters { + if in == nil { + return nil + } + out := new(HTTPMethodHTTPMethodsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMethodHTTPMethodsObservation) DeepCopyInto(out *HTTPMethodHTTPMethodsObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMethodHTTPMethodsObservation. +func (in *HTTPMethodHTTPMethodsObservation) DeepCopy() *HTTPMethodHTTPMethodsObservation { + if in == nil { + return nil + } + out := new(HTTPMethodHTTPMethodsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMethodHTTPMethodsParameters) DeepCopyInto(out *HTTPMethodHTTPMethodsParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMethodHTTPMethodsParameters. +func (in *HTTPMethodHTTPMethodsParameters) DeepCopy() *HTTPMethodHTTPMethodsParameters { + if in == nil { + return nil + } + out := new(HTTPMethodHTTPMethodsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMethodInitParameters) DeepCopyInto(out *HTTPMethodInitParameters) { + *out = *in + if in.HTTPMethods != nil { + in, out := &in.HTTPMethods, &out.HTTPMethods + *out = make([]HTTPMethodsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMethodInitParameters. +func (in *HTTPMethodInitParameters) DeepCopy() *HTTPMethodInitParameters { + if in == nil { + return nil + } + out := new(HTTPMethodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMethodObservation) DeepCopyInto(out *HTTPMethodObservation) { + *out = *in + if in.HTTPMethods != nil { + in, out := &in.HTTPMethods, &out.HTTPMethods + *out = make([]HTTPMethodsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMethodObservation. +func (in *HTTPMethodObservation) DeepCopy() *HTTPMethodObservation { + if in == nil { + return nil + } + out := new(HTTPMethodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMethodParameters) DeepCopyInto(out *HTTPMethodParameters) { + *out = *in + if in.HTTPMethods != nil { + in, out := &in.HTTPMethods, &out.HTTPMethods + *out = make([]HTTPMethodsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMethodParameters. +func (in *HTTPMethodParameters) DeepCopy() *HTTPMethodParameters { + if in == nil { + return nil + } + out := new(HTTPMethodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMethodsInitParameters) DeepCopyInto(out *HTTPMethodsInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMethodsInitParameters. +func (in *HTTPMethodsInitParameters) DeepCopy() *HTTPMethodsInitParameters { + if in == nil { + return nil + } + out := new(HTTPMethodsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMethodsObservation) DeepCopyInto(out *HTTPMethodsObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMethodsObservation. +func (in *HTTPMethodsObservation) DeepCopy() *HTTPMethodsObservation { + if in == nil { + return nil + } + out := new(HTTPMethodsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMethodsParameters) DeepCopyInto(out *HTTPMethodsParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMethodsParameters. +func (in *HTTPMethodsParameters) DeepCopy() *HTTPMethodsParameters { + if in == nil { + return nil + } + out := new(HTTPMethodsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersInitParameters) DeepCopyInto(out *HeadersInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersInitParameters. +func (in *HeadersInitParameters) DeepCopy() *HeadersInitParameters { + if in == nil { + return nil + } + out := new(HeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersObservation) DeepCopyInto(out *HeadersObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersObservation. +func (in *HeadersObservation) DeepCopy() *HeadersObservation { + if in == nil { + return nil + } + out := new(HeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersParameters) DeepCopyInto(out *HeadersParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersParameters. +func (in *HeadersParameters) DeepCopy() *HeadersParameters { + if in == nil { + return nil + } + out := new(HeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersValueInitParameters) DeepCopyInto(out *HeadersValueInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersValueInitParameters. +func (in *HeadersValueInitParameters) DeepCopy() *HeadersValueInitParameters { + if in == nil { + return nil + } + out := new(HeadersValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersValueObservation) DeepCopyInto(out *HeadersValueObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersValueObservation. +func (in *HeadersValueObservation) DeepCopy() *HeadersValueObservation { + if in == nil { + return nil + } + out := new(HeadersValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersValueParameters) DeepCopyInto(out *HeadersValueParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersValueParameters. +func (in *HeadersValueParameters) DeepCopy() *HeadersValueParameters { + if in == nil { + return nil + } + out := new(HeadersValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesMatchInitParameters) DeepCopyInto(out *IPRangesMatchInitParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesMatchInitParameters. +func (in *IPRangesMatchInitParameters) DeepCopy() *IPRangesMatchInitParameters { + if in == nil { + return nil + } + out := new(IPRangesMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesMatchObservation) DeepCopyInto(out *IPRangesMatchObservation) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesMatchObservation. +func (in *IPRangesMatchObservation) DeepCopy() *IPRangesMatchObservation { + if in == nil { + return nil + } + out := new(IPRangesMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesMatchParameters) DeepCopyInto(out *IPRangesMatchParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesMatchParameters. +func (in *IPRangesMatchParameters) DeepCopy() *IPRangesMatchParameters { + if in == nil { + return nil + } + out := new(IPRangesMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesNotMatchInitParameters) DeepCopyInto(out *IPRangesNotMatchInitParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesNotMatchInitParameters. +func (in *IPRangesNotMatchInitParameters) DeepCopy() *IPRangesNotMatchInitParameters { + if in == nil { + return nil + } + out := new(IPRangesNotMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesNotMatchObservation) DeepCopyInto(out *IPRangesNotMatchObservation) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesNotMatchObservation. +func (in *IPRangesNotMatchObservation) DeepCopy() *IPRangesNotMatchObservation { + if in == nil { + return nil + } + out := new(IPRangesNotMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRangesNotMatchParameters) DeepCopyInto(out *IPRangesNotMatchParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRangesNotMatchParameters. +func (in *IPRangesNotMatchParameters) DeepCopy() *IPRangesNotMatchParameters { + if in == nil { + return nil + } + out := new(IPRangesNotMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathInitParameters) DeepCopyInto(out *PathInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathInitParameters. +func (in *PathInitParameters) DeepCopy() *PathInitParameters { + if in == nil { + return nil + } + out := new(PathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathObservation) DeepCopyInto(out *PathObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathObservation. +func (in *PathObservation) DeepCopy() *PathObservation { + if in == nil { + return nil + } + out := new(PathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathParameters) DeepCopyInto(out *PathParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathParameters. +func (in *PathParameters) DeepCopy() *PathParameters { + if in == nil { + return nil + } + out := new(PathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesInitParameters) DeepCopyInto(out *QueriesInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]QueriesValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesInitParameters. +func (in *QueriesInitParameters) DeepCopy() *QueriesInitParameters { + if in == nil { + return nil + } + out := new(QueriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesObservation) DeepCopyInto(out *QueriesObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]QueriesValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesObservation. +func (in *QueriesObservation) DeepCopy() *QueriesObservation { + if in == nil { + return nil + } + out := new(QueriesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesParameters) DeepCopyInto(out *QueriesParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]QueriesValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesParameters. +func (in *QueriesParameters) DeepCopy() *QueriesParameters { + if in == nil { + return nil + } + out := new(QueriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesValueInitParameters) DeepCopyInto(out *QueriesValueInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesValueInitParameters. +func (in *QueriesValueInitParameters) DeepCopy() *QueriesValueInitParameters { + if in == nil { + return nil + } + out := new(QueriesValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesValueObservation) DeepCopyInto(out *QueriesValueObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesValueObservation. +func (in *QueriesValueObservation) DeepCopy() *QueriesValueObservation { + if in == nil { + return nil + } + out := new(QueriesValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueriesValueParameters) DeepCopyInto(out *QueriesValueParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueriesValueParameters. +func (in *QueriesValueParameters) DeepCopy() *QueriesValueParameters { + if in == nil { + return nil + } + out := new(QueriesValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIInitParameters) DeepCopyInto(out *RequestURIInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]PathInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]QueriesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIInitParameters. +func (in *RequestURIInitParameters) DeepCopy() *RequestURIInitParameters { + if in == nil { + return nil + } + out := new(RequestURIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIObservation) DeepCopyInto(out *RequestURIObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]PathObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]QueriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIObservation. +func (in *RequestURIObservation) DeepCopy() *RequestURIObservation { + if in == nil { + return nil + } + out := new(RequestURIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIParameters) DeepCopyInto(out *RequestURIParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]PathParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]QueriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIParameters. +func (in *RequestURIParameters) DeepCopy() *RequestURIParameters { + if in == nil { + return nil + } + out := new(RequestURIParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIPathInitParameters) DeepCopyInto(out *RequestURIPathInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIPathInitParameters. +func (in *RequestURIPathInitParameters) DeepCopy() *RequestURIPathInitParameters { + if in == nil { + return nil + } + out := new(RequestURIPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIPathObservation) DeepCopyInto(out *RequestURIPathObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIPathObservation. +func (in *RequestURIPathObservation) DeepCopy() *RequestURIPathObservation { + if in == nil { + return nil + } + out := new(RequestURIPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIPathParameters) DeepCopyInto(out *RequestURIPathParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIPathParameters. +func (in *RequestURIPathParameters) DeepCopy() *RequestURIPathParameters { + if in == nil { + return nil + } + out := new(RequestURIPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIQueriesInitParameters) DeepCopyInto(out *RequestURIQueriesInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]RequestURIQueriesValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIQueriesInitParameters. +func (in *RequestURIQueriesInitParameters) DeepCopy() *RequestURIQueriesInitParameters { + if in == nil { + return nil + } + out := new(RequestURIQueriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIQueriesObservation) DeepCopyInto(out *RequestURIQueriesObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]RequestURIQueriesValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIQueriesObservation. +func (in *RequestURIQueriesObservation) DeepCopy() *RequestURIQueriesObservation { + if in == nil { + return nil + } + out := new(RequestURIQueriesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIQueriesParameters) DeepCopyInto(out *RequestURIQueriesParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]RequestURIQueriesValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIQueriesParameters. +func (in *RequestURIQueriesParameters) DeepCopy() *RequestURIQueriesParameters { + if in == nil { + return nil + } + out := new(RequestURIQueriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIQueriesValueInitParameters) DeepCopyInto(out *RequestURIQueriesValueInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIQueriesValueInitParameters. +func (in *RequestURIQueriesValueInitParameters) DeepCopy() *RequestURIQueriesValueInitParameters { + if in == nil { + return nil + } + out := new(RequestURIQueriesValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIQueriesValueObservation) DeepCopyInto(out *RequestURIQueriesValueObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIQueriesValueObservation. +func (in *RequestURIQueriesValueObservation) DeepCopy() *RequestURIQueriesValueObservation { + if in == nil { + return nil + } + out := new(RequestURIQueriesValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIQueriesValueParameters) DeepCopyInto(out *RequestURIQueriesValueParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIQueriesValueParameters. +func (in *RequestURIQueriesValueParameters) DeepCopy() *RequestURIQueriesValueParameters { + if in == nil { + return nil + } + out := new(RequestURIQueriesValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleConditionInitParameters) DeepCopyInto(out *RuleConditionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleConditionInitParameters. +func (in *RuleConditionInitParameters) DeepCopy() *RuleConditionInitParameters { + if in == nil { + return nil + } + out := new(RuleConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleConditionObservation) DeepCopyInto(out *RuleConditionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleConditionObservation. +func (in *RuleConditionObservation) DeepCopy() *RuleConditionObservation { + if in == nil { + return nil + } + out := new(RuleConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleConditionParameters) DeepCopyInto(out *RuleConditionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleConditionParameters. +func (in *RuleConditionParameters) DeepCopy() *RuleConditionParameters { + if in == nil { + return nil + } + out := new(RuleConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfile) DeepCopyInto(out *SecurityProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfile. +func (in *SecurityProfile) DeepCopy() *SecurityProfile { + if in == nil { + return nil + } + out := new(SecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfileInitParameters) DeepCopyInto(out *SecurityProfileInitParameters) { + *out = *in + if in.AdvancedRateLimiterProfileID != nil { + in, out := &in.AdvancedRateLimiterProfileID, &out.AdvancedRateLimiterProfileID + *out = new(string) + **out = **in + } + if in.CaptchaID != nil { + in, out := &in.CaptchaID, &out.CaptchaID + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityRule != nil { + in, out := &in.SecurityRule, &out.SecurityRule + *out = make([]SecurityRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfileInitParameters. +func (in *SecurityProfileInitParameters) DeepCopy() *SecurityProfileInitParameters { + if in == nil { + return nil + } + out := new(SecurityProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfileList) DeepCopyInto(out *SecurityProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecurityProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfileList. +func (in *SecurityProfileList) DeepCopy() *SecurityProfileList { + if in == nil { + return nil + } + out := new(SecurityProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfileObservation) DeepCopyInto(out *SecurityProfileObservation) { + *out = *in + if in.AdvancedRateLimiterProfileID != nil { + in, out := &in.AdvancedRateLimiterProfileID, &out.AdvancedRateLimiterProfileID + *out = new(string) + **out = **in + } + if in.CaptchaID != nil { + in, out := &in.CaptchaID, &out.CaptchaID + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityRule != nil { + in, out := &in.SecurityRule, &out.SecurityRule + *out = make([]SecurityRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfileObservation. +func (in *SecurityProfileObservation) DeepCopy() *SecurityProfileObservation { + if in == nil { + return nil + } + out := new(SecurityProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfileParameters) DeepCopyInto(out *SecurityProfileParameters) { + *out = *in + if in.AdvancedRateLimiterProfileID != nil { + in, out := &in.AdvancedRateLimiterProfileID, &out.AdvancedRateLimiterProfileID + *out = new(string) + **out = **in + } + if in.CaptchaID != nil { + in, out := &in.CaptchaID, &out.CaptchaID + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityRule != nil { + in, out := &in.SecurityRule, &out.SecurityRule + *out = make([]SecurityRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfileParameters. +func (in *SecurityProfileParameters) DeepCopy() *SecurityProfileParameters { + if in == nil { + return nil + } + out := new(SecurityProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfileSpec) DeepCopyInto(out *SecurityProfileSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfileSpec. +func (in *SecurityProfileSpec) DeepCopy() *SecurityProfileSpec { + if in == nil { + return nil + } + out := new(SecurityProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfileStatus) DeepCopyInto(out *SecurityProfileStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfileStatus. +func (in *SecurityProfileStatus) DeepCopy() *SecurityProfileStatus { + if in == nil { + return nil + } + out := new(SecurityProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityRuleInitParameters) DeepCopyInto(out *SecurityRuleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleCondition != nil { + in, out := &in.RuleCondition, &out.RuleCondition + *out = make([]RuleConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SmartProtection != nil { + in, out := &in.SmartProtection, &out.SmartProtection + *out = make([]SmartProtectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Waf != nil { + in, out := &in.Waf, &out.Waf + *out = make([]WafInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityRuleInitParameters. +func (in *SecurityRuleInitParameters) DeepCopy() *SecurityRuleInitParameters { + if in == nil { + return nil + } + out := new(SecurityRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityRuleObservation) DeepCopyInto(out *SecurityRuleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleCondition != nil { + in, out := &in.RuleCondition, &out.RuleCondition + *out = make([]RuleConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SmartProtection != nil { + in, out := &in.SmartProtection, &out.SmartProtection + *out = make([]SmartProtectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Waf != nil { + in, out := &in.Waf, &out.Waf + *out = make([]WafObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityRuleObservation. +func (in *SecurityRuleObservation) DeepCopy() *SecurityRuleObservation { + if in == nil { + return nil + } + out := new(SecurityRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityRuleParameters) DeepCopyInto(out *SecurityRuleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleCondition != nil { + in, out := &in.RuleCondition, &out.RuleCondition + *out = make([]RuleConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SmartProtection != nil { + in, out := &in.SmartProtection, &out.SmartProtection + *out = make([]SmartProtectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Waf != nil { + in, out := &in.Waf, &out.Waf + *out = make([]WafParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityRuleParameters. +func (in *SecurityRuleParameters) DeepCopy() *SecurityRuleParameters { + if in == nil { + return nil + } + out := new(SecurityRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmartProtectionConditionInitParameters) DeepCopyInto(out *SmartProtectionConditionInitParameters) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]ConditionAuthorityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]ConditionHTTPMethodInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]ConditionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]ConditionRequestURIInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]ConditionSourceIPInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmartProtectionConditionInitParameters. +func (in *SmartProtectionConditionInitParameters) DeepCopy() *SmartProtectionConditionInitParameters { + if in == nil { + return nil + } + out := new(SmartProtectionConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmartProtectionConditionObservation) DeepCopyInto(out *SmartProtectionConditionObservation) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]ConditionAuthorityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]ConditionHTTPMethodObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]ConditionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]ConditionRequestURIObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]ConditionSourceIPObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmartProtectionConditionObservation. +func (in *SmartProtectionConditionObservation) DeepCopy() *SmartProtectionConditionObservation { + if in == nil { + return nil + } + out := new(SmartProtectionConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmartProtectionConditionParameters) DeepCopyInto(out *SmartProtectionConditionParameters) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]ConditionAuthorityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]ConditionHTTPMethodParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]ConditionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]ConditionRequestURIParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]ConditionSourceIPParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmartProtectionConditionParameters. +func (in *SmartProtectionConditionParameters) DeepCopy() *SmartProtectionConditionParameters { + if in == nil { + return nil + } + out := new(SmartProtectionConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmartProtectionInitParameters) DeepCopyInto(out *SmartProtectionInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]SmartProtectionConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmartProtectionInitParameters. +func (in *SmartProtectionInitParameters) DeepCopy() *SmartProtectionInitParameters { + if in == nil { + return nil + } + out := new(SmartProtectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmartProtectionObservation) DeepCopyInto(out *SmartProtectionObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]SmartProtectionConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmartProtectionObservation. +func (in *SmartProtectionObservation) DeepCopy() *SmartProtectionObservation { + if in == nil { + return nil + } + out := new(SmartProtectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmartProtectionParameters) DeepCopyInto(out *SmartProtectionParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]SmartProtectionConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmartProtectionParameters. +func (in *SmartProtectionParameters) DeepCopy() *SmartProtectionParameters { + if in == nil { + return nil + } + out := new(SmartProtectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPGeoIPMatchInitParameters) DeepCopyInto(out *SourceIPGeoIPMatchInitParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPGeoIPMatchInitParameters. +func (in *SourceIPGeoIPMatchInitParameters) DeepCopy() *SourceIPGeoIPMatchInitParameters { + if in == nil { + return nil + } + out := new(SourceIPGeoIPMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPGeoIPMatchObservation) DeepCopyInto(out *SourceIPGeoIPMatchObservation) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPGeoIPMatchObservation. +func (in *SourceIPGeoIPMatchObservation) DeepCopy() *SourceIPGeoIPMatchObservation { + if in == nil { + return nil + } + out := new(SourceIPGeoIPMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPGeoIPMatchParameters) DeepCopyInto(out *SourceIPGeoIPMatchParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPGeoIPMatchParameters. +func (in *SourceIPGeoIPMatchParameters) DeepCopy() *SourceIPGeoIPMatchParameters { + if in == nil { + return nil + } + out := new(SourceIPGeoIPMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPGeoIPNotMatchInitParameters) DeepCopyInto(out *SourceIPGeoIPNotMatchInitParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPGeoIPNotMatchInitParameters. +func (in *SourceIPGeoIPNotMatchInitParameters) DeepCopy() *SourceIPGeoIPNotMatchInitParameters { + if in == nil { + return nil + } + out := new(SourceIPGeoIPNotMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPGeoIPNotMatchObservation) DeepCopyInto(out *SourceIPGeoIPNotMatchObservation) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPGeoIPNotMatchObservation. +func (in *SourceIPGeoIPNotMatchObservation) DeepCopy() *SourceIPGeoIPNotMatchObservation { + if in == nil { + return nil + } + out := new(SourceIPGeoIPNotMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPGeoIPNotMatchParameters) DeepCopyInto(out *SourceIPGeoIPNotMatchParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPGeoIPNotMatchParameters. +func (in *SourceIPGeoIPNotMatchParameters) DeepCopy() *SourceIPGeoIPNotMatchParameters { + if in == nil { + return nil + } + out := new(SourceIPGeoIPNotMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPIPRangesMatchInitParameters) DeepCopyInto(out *SourceIPIPRangesMatchInitParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPIPRangesMatchInitParameters. +func (in *SourceIPIPRangesMatchInitParameters) DeepCopy() *SourceIPIPRangesMatchInitParameters { + if in == nil { + return nil + } + out := new(SourceIPIPRangesMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPIPRangesMatchObservation) DeepCopyInto(out *SourceIPIPRangesMatchObservation) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPIPRangesMatchObservation. +func (in *SourceIPIPRangesMatchObservation) DeepCopy() *SourceIPIPRangesMatchObservation { + if in == nil { + return nil + } + out := new(SourceIPIPRangesMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPIPRangesMatchParameters) DeepCopyInto(out *SourceIPIPRangesMatchParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPIPRangesMatchParameters. +func (in *SourceIPIPRangesMatchParameters) DeepCopy() *SourceIPIPRangesMatchParameters { + if in == nil { + return nil + } + out := new(SourceIPIPRangesMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPIPRangesNotMatchInitParameters) DeepCopyInto(out *SourceIPIPRangesNotMatchInitParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPIPRangesNotMatchInitParameters. +func (in *SourceIPIPRangesNotMatchInitParameters) DeepCopy() *SourceIPIPRangesNotMatchInitParameters { + if in == nil { + return nil + } + out := new(SourceIPIPRangesNotMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPIPRangesNotMatchObservation) DeepCopyInto(out *SourceIPIPRangesNotMatchObservation) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPIPRangesNotMatchObservation. +func (in *SourceIPIPRangesNotMatchObservation) DeepCopy() *SourceIPIPRangesNotMatchObservation { + if in == nil { + return nil + } + out := new(SourceIPIPRangesNotMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPIPRangesNotMatchParameters) DeepCopyInto(out *SourceIPIPRangesNotMatchParameters) { + *out = *in + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPIPRangesNotMatchParameters. +func (in *SourceIPIPRangesNotMatchParameters) DeepCopy() *SourceIPIPRangesNotMatchParameters { + if in == nil { + return nil + } + out := new(SourceIPIPRangesNotMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPInitParameters) DeepCopyInto(out *SourceIPInitParameters) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]GeoIPMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]GeoIPNotMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]IPRangesMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]IPRangesNotMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPInitParameters. +func (in *SourceIPInitParameters) DeepCopy() *SourceIPInitParameters { + if in == nil { + return nil + } + out := new(SourceIPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPObservation) DeepCopyInto(out *SourceIPObservation) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]GeoIPMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]GeoIPNotMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]IPRangesMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]IPRangesNotMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPObservation. +func (in *SourceIPObservation) DeepCopy() *SourceIPObservation { + if in == nil { + return nil + } + out := new(SourceIPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPParameters) DeepCopyInto(out *SourceIPParameters) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]GeoIPMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]GeoIPNotMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]IPRangesMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]IPRangesNotMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPParameters. +func (in *SourceIPParameters) DeepCopy() *SourceIPParameters { + if in == nil { + return nil + } + out := new(SourceIPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueInitParameters) DeepCopyInto(out *ValueInitParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueInitParameters. +func (in *ValueInitParameters) DeepCopy() *ValueInitParameters { + if in == nil { + return nil + } + out := new(ValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueObservation) DeepCopyInto(out *ValueObservation) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueObservation. +func (in *ValueObservation) DeepCopy() *ValueObservation { + if in == nil { + return nil + } + out := new(ValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueParameters) DeepCopyInto(out *ValueParameters) { + *out = *in + if in.ExactMatch != nil { + in, out := &in.ExactMatch, &out.ExactMatch + *out = new(string) + **out = **in + } + if in.ExactNotMatch != nil { + in, out := &in.ExactNotMatch, &out.ExactNotMatch + *out = new(string) + **out = **in + } + if in.PireRegexMatch != nil { + in, out := &in.PireRegexMatch, &out.PireRegexMatch + *out = new(string) + **out = **in + } + if in.PireRegexNotMatch != nil { + in, out := &in.PireRegexNotMatch, &out.PireRegexNotMatch + *out = new(string) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = new(string) + **out = **in + } + if in.PrefixNotMatch != nil { + in, out := &in.PrefixNotMatch, &out.PrefixNotMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueParameters. +func (in *ValueParameters) DeepCopy() *ValueParameters { + if in == nil { + return nil + } + out := new(ValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionAuthorityInitParameters) DeepCopyInto(out *WafConditionAuthorityInitParameters) { + *out = *in + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]ConditionAuthorityAuthoritiesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionAuthorityInitParameters. +func (in *WafConditionAuthorityInitParameters) DeepCopy() *WafConditionAuthorityInitParameters { + if in == nil { + return nil + } + out := new(WafConditionAuthorityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionAuthorityObservation) DeepCopyInto(out *WafConditionAuthorityObservation) { + *out = *in + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]ConditionAuthorityAuthoritiesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionAuthorityObservation. +func (in *WafConditionAuthorityObservation) DeepCopy() *WafConditionAuthorityObservation { + if in == nil { + return nil + } + out := new(WafConditionAuthorityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionAuthorityParameters) DeepCopyInto(out *WafConditionAuthorityParameters) { + *out = *in + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]ConditionAuthorityAuthoritiesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionAuthorityParameters. +func (in *WafConditionAuthorityParameters) DeepCopy() *WafConditionAuthorityParameters { + if in == nil { + return nil + } + out := new(WafConditionAuthorityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionHTTPMethodInitParameters) DeepCopyInto(out *WafConditionHTTPMethodInitParameters) { + *out = *in + if in.HTTPMethods != nil { + in, out := &in.HTTPMethods, &out.HTTPMethods + *out = make([]ConditionHTTPMethodHTTPMethodsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionHTTPMethodInitParameters. +func (in *WafConditionHTTPMethodInitParameters) DeepCopy() *WafConditionHTTPMethodInitParameters { + if in == nil { + return nil + } + out := new(WafConditionHTTPMethodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionHTTPMethodObservation) DeepCopyInto(out *WafConditionHTTPMethodObservation) { + *out = *in + if in.HTTPMethods != nil { + in, out := &in.HTTPMethods, &out.HTTPMethods + *out = make([]ConditionHTTPMethodHTTPMethodsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionHTTPMethodObservation. +func (in *WafConditionHTTPMethodObservation) DeepCopy() *WafConditionHTTPMethodObservation { + if in == nil { + return nil + } + out := new(WafConditionHTTPMethodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionHTTPMethodParameters) DeepCopyInto(out *WafConditionHTTPMethodParameters) { + *out = *in + if in.HTTPMethods != nil { + in, out := &in.HTTPMethods, &out.HTTPMethods + *out = make([]ConditionHTTPMethodHTTPMethodsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionHTTPMethodParameters. +func (in *WafConditionHTTPMethodParameters) DeepCopy() *WafConditionHTTPMethodParameters { + if in == nil { + return nil + } + out := new(WafConditionHTTPMethodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionHeadersInitParameters) DeepCopyInto(out *WafConditionHeadersInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ConditionHeadersValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionHeadersInitParameters. +func (in *WafConditionHeadersInitParameters) DeepCopy() *WafConditionHeadersInitParameters { + if in == nil { + return nil + } + out := new(WafConditionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionHeadersObservation) DeepCopyInto(out *WafConditionHeadersObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ConditionHeadersValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionHeadersObservation. +func (in *WafConditionHeadersObservation) DeepCopy() *WafConditionHeadersObservation { + if in == nil { + return nil + } + out := new(WafConditionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionHeadersParameters) DeepCopyInto(out *WafConditionHeadersParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]ConditionHeadersValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionHeadersParameters. +func (in *WafConditionHeadersParameters) DeepCopy() *WafConditionHeadersParameters { + if in == nil { + return nil + } + out := new(WafConditionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionInitParameters) DeepCopyInto(out *WafConditionInitParameters) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]WafConditionAuthorityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]WafConditionHTTPMethodInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WafConditionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]WafConditionRequestURIInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]WafConditionSourceIPInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionInitParameters. +func (in *WafConditionInitParameters) DeepCopy() *WafConditionInitParameters { + if in == nil { + return nil + } + out := new(WafConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionObservation) DeepCopyInto(out *WafConditionObservation) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]WafConditionAuthorityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]WafConditionHTTPMethodObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WafConditionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]WafConditionRequestURIObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]WafConditionSourceIPObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionObservation. +func (in *WafConditionObservation) DeepCopy() *WafConditionObservation { + if in == nil { + return nil + } + out := new(WafConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionParameters) DeepCopyInto(out *WafConditionParameters) { + *out = *in + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = make([]WafConditionAuthorityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = make([]WafConditionHTTPMethodParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WafConditionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURI != nil { + in, out := &in.RequestURI, &out.RequestURI + *out = make([]WafConditionRequestURIParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = make([]WafConditionSourceIPParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionParameters. +func (in *WafConditionParameters) DeepCopy() *WafConditionParameters { + if in == nil { + return nil + } + out := new(WafConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionRequestURIInitParameters) DeepCopyInto(out *WafConditionRequestURIInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]ConditionRequestURIPathInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]ConditionRequestURIQueriesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionRequestURIInitParameters. +func (in *WafConditionRequestURIInitParameters) DeepCopy() *WafConditionRequestURIInitParameters { + if in == nil { + return nil + } + out := new(WafConditionRequestURIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionRequestURIObservation) DeepCopyInto(out *WafConditionRequestURIObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]ConditionRequestURIPathObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]ConditionRequestURIQueriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionRequestURIObservation. +func (in *WafConditionRequestURIObservation) DeepCopy() *WafConditionRequestURIObservation { + if in == nil { + return nil + } + out := new(WafConditionRequestURIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionRequestURIParameters) DeepCopyInto(out *WafConditionRequestURIParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = make([]ConditionRequestURIPathParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]ConditionRequestURIQueriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionRequestURIParameters. +func (in *WafConditionRequestURIParameters) DeepCopy() *WafConditionRequestURIParameters { + if in == nil { + return nil + } + out := new(WafConditionRequestURIParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionSourceIPInitParameters) DeepCopyInto(out *WafConditionSourceIPInitParameters) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]ConditionSourceIPGeoIPMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]ConditionSourceIPGeoIPNotMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]ConditionSourceIPIPRangesMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]ConditionSourceIPIPRangesNotMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionSourceIPInitParameters. +func (in *WafConditionSourceIPInitParameters) DeepCopy() *WafConditionSourceIPInitParameters { + if in == nil { + return nil + } + out := new(WafConditionSourceIPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionSourceIPObservation) DeepCopyInto(out *WafConditionSourceIPObservation) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]ConditionSourceIPGeoIPMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]ConditionSourceIPGeoIPNotMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]ConditionSourceIPIPRangesMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]ConditionSourceIPIPRangesNotMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionSourceIPObservation. +func (in *WafConditionSourceIPObservation) DeepCopy() *WafConditionSourceIPObservation { + if in == nil { + return nil + } + out := new(WafConditionSourceIPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConditionSourceIPParameters) DeepCopyInto(out *WafConditionSourceIPParameters) { + *out = *in + if in.GeoIPMatch != nil { + in, out := &in.GeoIPMatch, &out.GeoIPMatch + *out = make([]ConditionSourceIPGeoIPMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoIPNotMatch != nil { + in, out := &in.GeoIPNotMatch, &out.GeoIPNotMatch + *out = make([]ConditionSourceIPGeoIPNotMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesMatch != nil { + in, out := &in.IPRangesMatch, &out.IPRangesMatch + *out = make([]ConditionSourceIPIPRangesMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangesNotMatch != nil { + in, out := &in.IPRangesNotMatch, &out.IPRangesNotMatch + *out = make([]ConditionSourceIPIPRangesNotMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConditionSourceIPParameters. +func (in *WafConditionSourceIPParameters) DeepCopy() *WafConditionSourceIPParameters { + if in == nil { + return nil + } + out := new(WafConditionSourceIPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafInitParameters) DeepCopyInto(out *WafInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]WafConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.WafProfileID != nil { + in, out := &in.WafProfileID, &out.WafProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafInitParameters. +func (in *WafInitParameters) DeepCopy() *WafInitParameters { + if in == nil { + return nil + } + out := new(WafInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafObservation) DeepCopyInto(out *WafObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]WafConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.WafProfileID != nil { + in, out := &in.WafProfileID, &out.WafProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafObservation. +func (in *WafObservation) DeepCopy() *WafObservation { + if in == nil { + return nil + } + out := new(WafObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafParameters) DeepCopyInto(out *WafParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]WafConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.WafProfileID != nil { + in, out := &in.WafProfileID, &out.WafProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafParameters. +func (in *WafParameters) DeepCopy() *WafParameters { + if in == nil { + return nil + } + out := new(WafParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/sws/v1alpha1/zz_generated.resolvers.go b/apis/sws/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..2e38f98 --- /dev/null +++ b/apis/sws/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this SecurityProfile. +func (mg *SecurityProfile) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/sws/v1alpha1/zz_groupversion_info.go b/apis/sws/v1alpha1/zz_groupversion_info.go index fe0a5f0..46ca0cd 100755 --- a/apis/sws/v1alpha1/zz_groupversion_info.go +++ b/apis/sws/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/sws/v1alpha1/zz_securityprofile_terraformed.go b/apis/sws/v1alpha1/zz_securityprofile_terraformed.go index 0dbbde3..2396951 100755 --- a/apis/sws/v1alpha1/zz_securityprofile_terraformed.go +++ b/apis/sws/v1alpha1/zz_securityprofile_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this SecurityProfile func (mg *SecurityProfile) GetTerraformResourceType() string { - return "yandex_sws_security_profile" + return "yandex_sws_security_profile" } // GetConnectionDetailsMapping for this SecurityProfile func (tr *SecurityProfile) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this SecurityProfile func (tr *SecurityProfile) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this SecurityProfile func (tr *SecurityProfile) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this SecurityProfile func (tr *SecurityProfile) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this SecurityProfile func (tr *SecurityProfile) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this SecurityProfile func (tr *SecurityProfile) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this SecurityProfile func (tr *SecurityProfile) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this SecurityProfile func (tr *SecurityProfile) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this SecurityProfile using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *SecurityProfile) LateInitialize(attrs []byte) (bool, error) { - params := &SecurityProfileParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &SecurityProfileParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *SecurityProfile) GetTerraformSchemaVersion() int { - return 1 + return 1 } diff --git a/apis/sws/v1alpha1/zz_securityprofile_types.go b/apis/sws/v1alpha1/zz_securityprofile_types.go index 3e71580..825da4b 100755 --- a/apis/sws/v1alpha1/zz_securityprofile_types.go +++ b/apis/sws/v1alpha1/zz_securityprofile_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,2147 +7,1736 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AuthoritiesInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type AuthoritiesObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type AuthoritiesParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type AuthorityAuthoritiesInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type AuthorityAuthoritiesObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type AuthorityAuthoritiesParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type AuthorityInitParameters struct { - - -Authorities []AuthoritiesInitParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` + Authorities []AuthoritiesInitParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` } - type AuthorityObservation struct { - - -Authorities []AuthoritiesObservation `json:"authorities,omitempty" tf:"authorities,omitempty"` + Authorities []AuthoritiesObservation `json:"authorities,omitempty" tf:"authorities,omitempty"` } - type AuthorityParameters struct { - -// +kubebuilder:validation:Optional -Authorities []AuthoritiesParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` + // +kubebuilder:validation:Optional + Authorities []AuthoritiesParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` } - type ConditionAuthorityAuthoritiesInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionAuthorityAuthoritiesObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionAuthorityAuthoritiesParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionAuthorityInitParameters struct { - - -Authorities []AuthorityAuthoritiesInitParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` + Authorities []AuthorityAuthoritiesInitParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` } - type ConditionAuthorityObservation struct { - - -Authorities []AuthorityAuthoritiesObservation `json:"authorities,omitempty" tf:"authorities,omitempty"` + Authorities []AuthorityAuthoritiesObservation `json:"authorities,omitempty" tf:"authorities,omitempty"` } - type ConditionAuthorityParameters struct { - -// +kubebuilder:validation:Optional -Authorities []AuthorityAuthoritiesParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` + // +kubebuilder:validation:Optional + Authorities []AuthorityAuthoritiesParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` } - type ConditionHTTPMethodHTTPMethodsInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionHTTPMethodHTTPMethodsObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionHTTPMethodHTTPMethodsParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionHTTPMethodInitParameters struct { - - -HTTPMethods []HTTPMethodHTTPMethodsInitParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` + HTTPMethods []HTTPMethodHTTPMethodsInitParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` } - type ConditionHTTPMethodObservation struct { - - -HTTPMethods []HTTPMethodHTTPMethodsObservation `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` + HTTPMethods []HTTPMethodHTTPMethodsObservation `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` } - type ConditionHTTPMethodParameters struct { - -// +kubebuilder:validation:Optional -HTTPMethods []HTTPMethodHTTPMethodsParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` + // +kubebuilder:validation:Optional + HTTPMethods []HTTPMethodHTTPMethodsParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` } - type ConditionHeadersInitParameters struct { + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []HeadersValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + Value []HeadersValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type ConditionHeadersObservation struct { + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []HeadersValueObservation `json:"value,omitempty" tf:"value,omitempty"` + Value []HeadersValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type ConditionHeadersParameters struct { + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// +kubebuilder:validation:Optional -Value []HeadersValueParameters `json:"value" tf:"value,omitempty"` + // +kubebuilder:validation:Optional + Value []HeadersValueParameters `json:"value" tf:"value,omitempty"` } - type ConditionHeadersValueInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionHeadersValueObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionHeadersValueParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionInitParameters struct { + Authority []AuthorityInitParameters `json:"authority,omitempty" tf:"authority,omitempty"` + HTTPMethod []HTTPMethodInitParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -Authority []AuthorityInitParameters `json:"authority,omitempty" tf:"authority,omitempty"` - -HTTPMethod []HTTPMethodInitParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` - -Headers []HeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + Headers []HeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` -RequestURI []RequestURIInitParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + RequestURI []RequestURIInitParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` -SourceIP []SourceIPInitParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + SourceIP []SourceIPInitParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type ConditionObservation struct { + Authority []AuthorityObservation `json:"authority,omitempty" tf:"authority,omitempty"` + HTTPMethod []HTTPMethodObservation `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -Authority []AuthorityObservation `json:"authority,omitempty" tf:"authority,omitempty"` - -HTTPMethod []HTTPMethodObservation `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + Headers []HeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` -Headers []HeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + RequestURI []RequestURIObservation `json:"requestUri,omitempty" tf:"request_uri,omitempty"` -RequestURI []RequestURIObservation `json:"requestUri,omitempty" tf:"request_uri,omitempty"` - -SourceIP []SourceIPObservation `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + SourceIP []SourceIPObservation `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type ConditionParameters struct { + // +kubebuilder:validation:Optional + Authority []AuthorityParameters `json:"authority,omitempty" tf:"authority,omitempty"` -// +kubebuilder:validation:Optional -Authority []AuthorityParameters `json:"authority,omitempty" tf:"authority,omitempty"` + // +kubebuilder:validation:Optional + HTTPMethod []HTTPMethodParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -// +kubebuilder:validation:Optional -HTTPMethod []HTTPMethodParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + // +kubebuilder:validation:Optional + Headers []HeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` -// +kubebuilder:validation:Optional -Headers []HeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + // +kubebuilder:validation:Optional + RequestURI []RequestURIParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` -// +kubebuilder:validation:Optional -RequestURI []RequestURIParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` - -// +kubebuilder:validation:Optional -SourceIP []SourceIPParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + // +kubebuilder:validation:Optional + SourceIP []SourceIPParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type ConditionRequestURIInitParameters struct { + Path []RequestURIPathInitParameters `json:"path,omitempty" tf:"path,omitempty"` - -Path []RequestURIPathInitParameters `json:"path,omitempty" tf:"path,omitempty"` - -Queries []RequestURIQueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` + Queries []RequestURIQueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` } - type ConditionRequestURIObservation struct { + Path []RequestURIPathObservation `json:"path,omitempty" tf:"path,omitempty"` - -Path []RequestURIPathObservation `json:"path,omitempty" tf:"path,omitempty"` - -Queries []RequestURIQueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` + Queries []RequestURIQueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` } - type ConditionRequestURIParameters struct { + // +kubebuilder:validation:Optional + Path []RequestURIPathParameters `json:"path,omitempty" tf:"path,omitempty"` -// +kubebuilder:validation:Optional -Path []RequestURIPathParameters `json:"path,omitempty" tf:"path,omitempty"` - -// +kubebuilder:validation:Optional -Queries []RequestURIQueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` + // +kubebuilder:validation:Optional + Queries []RequestURIQueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` } - type ConditionRequestURIPathInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionRequestURIPathObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionRequestURIPathParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionRequestURIQueriesInitParameters struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Value []ConditionRequestURIQueriesValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + Value []ConditionRequestURIQueriesValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type ConditionRequestURIQueriesObservation struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Value []ConditionRequestURIQueriesValueObservation `json:"value,omitempty" tf:"value,omitempty"` + Value []ConditionRequestURIQueriesValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type ConditionRequestURIQueriesParameters struct { + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` -// +kubebuilder:validation:Optional -Key *string `json:"key" tf:"key,omitempty"` - -// +kubebuilder:validation:Optional -Value []ConditionRequestURIQueriesValueParameters `json:"value" tf:"value,omitempty"` + // +kubebuilder:validation:Optional + Value []ConditionRequestURIQueriesValueParameters `json:"value" tf:"value,omitempty"` } - type ConditionRequestURIQueriesValueInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionRequestURIQueriesValueObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionRequestURIQueriesValueParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ConditionSourceIPGeoIPMatchInitParameters struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type ConditionSourceIPGeoIPMatchObservation struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type ConditionSourceIPGeoIPMatchParameters struct { - -// +kubebuilder:validation:Optional -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // +kubebuilder:validation:Optional + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type ConditionSourceIPGeoIPNotMatchInitParameters struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type ConditionSourceIPGeoIPNotMatchObservation struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type ConditionSourceIPGeoIPNotMatchParameters struct { - -// +kubebuilder:validation:Optional -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // +kubebuilder:validation:Optional + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type ConditionSourceIPIPRangesMatchInitParameters struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type ConditionSourceIPIPRangesMatchObservation struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type ConditionSourceIPIPRangesMatchParameters struct { - -// +kubebuilder:validation:Optional -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // +kubebuilder:validation:Optional + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type ConditionSourceIPIPRangesNotMatchInitParameters struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type ConditionSourceIPIPRangesNotMatchObservation struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type ConditionSourceIPIPRangesNotMatchParameters struct { - -// +kubebuilder:validation:Optional -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // +kubebuilder:validation:Optional + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type ConditionSourceIPInitParameters struct { + GeoIPMatch []SourceIPGeoIPMatchInitParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` + GeoIPNotMatch []SourceIPGeoIPNotMatchInitParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -GeoIPMatch []SourceIPGeoIPMatchInitParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` - -GeoIPNotMatch []SourceIPGeoIPNotMatchInitParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` - -IPRangesMatch []SourceIPIPRangesMatchInitParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` + IPRangesMatch []SourceIPIPRangesMatchInitParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -IPRangesNotMatch []SourceIPIPRangesNotMatchInitParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + IPRangesNotMatch []SourceIPIPRangesNotMatchInitParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type ConditionSourceIPObservation struct { + GeoIPMatch []SourceIPGeoIPMatchObservation `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` + GeoIPNotMatch []SourceIPGeoIPNotMatchObservation `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -GeoIPMatch []SourceIPGeoIPMatchObservation `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` - -GeoIPNotMatch []SourceIPGeoIPNotMatchObservation `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` - -IPRangesMatch []SourceIPIPRangesMatchObservation `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` + IPRangesMatch []SourceIPIPRangesMatchObservation `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -IPRangesNotMatch []SourceIPIPRangesNotMatchObservation `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + IPRangesNotMatch []SourceIPIPRangesNotMatchObservation `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type ConditionSourceIPParameters struct { + // +kubebuilder:validation:Optional + GeoIPMatch []SourceIPGeoIPMatchParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` -// +kubebuilder:validation:Optional -GeoIPMatch []SourceIPGeoIPMatchParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` - -// +kubebuilder:validation:Optional -GeoIPNotMatch []SourceIPGeoIPNotMatchParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` + // +kubebuilder:validation:Optional + GeoIPNotMatch []SourceIPGeoIPNotMatchParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -// +kubebuilder:validation:Optional -IPRangesMatch []SourceIPIPRangesMatchParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` + // +kubebuilder:validation:Optional + IPRangesMatch []SourceIPIPRangesMatchParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -// +kubebuilder:validation:Optional -IPRangesNotMatch []SourceIPIPRangesNotMatchParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + // +kubebuilder:validation:Optional + IPRangesNotMatch []SourceIPIPRangesNotMatchParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type GeoIPMatchInitParameters struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPMatchObservation struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPMatchParameters struct { - -// +kubebuilder:validation:Optional -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // +kubebuilder:validation:Optional + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPNotMatchInitParameters struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPNotMatchObservation struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type GeoIPNotMatchParameters struct { - -// +kubebuilder:validation:Optional -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // +kubebuilder:validation:Optional + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type HTTPMethodHTTPMethodsInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HTTPMethodHTTPMethodsObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HTTPMethodHTTPMethodsParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HTTPMethodInitParameters struct { - - -HTTPMethods []HTTPMethodsInitParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` + HTTPMethods []HTTPMethodsInitParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` } - type HTTPMethodObservation struct { - - -HTTPMethods []HTTPMethodsObservation `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` + HTTPMethods []HTTPMethodsObservation `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` } - type HTTPMethodParameters struct { - -// +kubebuilder:validation:Optional -HTTPMethods []HTTPMethodsParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` + // +kubebuilder:validation:Optional + HTTPMethods []HTTPMethodsParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` } - type HTTPMethodsInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HTTPMethodsObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HTTPMethodsParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HeadersInitParameters struct { + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []ValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + Value []ValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type HeadersObservation struct { + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []ValueObservation `json:"value,omitempty" tf:"value,omitempty"` + Value []ValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type HeadersParameters struct { + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// +kubebuilder:validation:Optional -Value []ValueParameters `json:"value" tf:"value,omitempty"` + // +kubebuilder:validation:Optional + Value []ValueParameters `json:"value" tf:"value,omitempty"` } - type HeadersValueInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` - -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HeadersValueObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` - -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type HeadersValueParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type IPRangesMatchInitParameters struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesMatchObservation struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesMatchParameters struct { - -// +kubebuilder:validation:Optional -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // +kubebuilder:validation:Optional + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesNotMatchInitParameters struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesNotMatchObservation struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type IPRangesNotMatchParameters struct { - -// +kubebuilder:validation:Optional -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // +kubebuilder:validation:Optional + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type PathInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type PathObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type PathParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type QueriesInitParameters struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Value []QueriesValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + Value []QueriesValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type QueriesObservation struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Value []QueriesValueObservation `json:"value,omitempty" tf:"value,omitempty"` + Value []QueriesValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type QueriesParameters struct { + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` -// +kubebuilder:validation:Optional -Key *string `json:"key" tf:"key,omitempty"` - -// +kubebuilder:validation:Optional -Value []QueriesValueParameters `json:"value" tf:"value,omitempty"` + // +kubebuilder:validation:Optional + Value []QueriesValueParameters `json:"value" tf:"value,omitempty"` } - type QueriesValueInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type QueriesValueObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type QueriesValueParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type RequestURIInitParameters struct { + Path []PathInitParameters `json:"path,omitempty" tf:"path,omitempty"` - -Path []PathInitParameters `json:"path,omitempty" tf:"path,omitempty"` - -Queries []QueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` + Queries []QueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` } - type RequestURIObservation struct { + Path []PathObservation `json:"path,omitempty" tf:"path,omitempty"` - -Path []PathObservation `json:"path,omitempty" tf:"path,omitempty"` - -Queries []QueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` + Queries []QueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` } - type RequestURIParameters struct { + // +kubebuilder:validation:Optional + Path []PathParameters `json:"path,omitempty" tf:"path,omitempty"` -// +kubebuilder:validation:Optional -Path []PathParameters `json:"path,omitempty" tf:"path,omitempty"` - -// +kubebuilder:validation:Optional -Queries []QueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` + // +kubebuilder:validation:Optional + Queries []QueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` } - type RequestURIPathInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type RequestURIPathObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` - -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type RequestURIPathParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type RequestURIQueriesInitParameters struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Value []RequestURIQueriesValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + Value []RequestURIQueriesValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type RequestURIQueriesObservation struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Key *string `json:"key,omitempty" tf:"key,omitempty"` - -Value []RequestURIQueriesValueObservation `json:"value,omitempty" tf:"value,omitempty"` + Value []RequestURIQueriesValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type RequestURIQueriesParameters struct { + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` -// +kubebuilder:validation:Optional -Key *string `json:"key" tf:"key,omitempty"` - -// +kubebuilder:validation:Optional -Value []RequestURIQueriesValueParameters `json:"value" tf:"value,omitempty"` + // +kubebuilder:validation:Optional + Value []RequestURIQueriesValueParameters `json:"value" tf:"value,omitempty"` } - type RequestURIQueriesValueInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` - -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type RequestURIQueriesValueObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` - -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type RequestURIQueriesValueParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` - -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type RuleConditionInitParameters struct { + // Action to perform if this rule matched. Possible values: ALLOW or DENY. + Action *string `json:"action,omitempty" tf:"action,omitempty"` -// Action to perform if this rule matched. Possible values: ALLOW or DENY. -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -// The condition for matching the rule. You can find all possibilities of condition in gRPC specs. -Condition []ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + // The condition for matching the rule. You can find all possibilities of condition in gRPC specs. + Condition []ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` } - type RuleConditionObservation struct { + // Action to perform if this rule matched. Possible values: ALLOW or DENY. + Action *string `json:"action,omitempty" tf:"action,omitempty"` -// Action to perform if this rule matched. Possible values: ALLOW or DENY. -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -// The condition for matching the rule. You can find all possibilities of condition in gRPC specs. -Condition []ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + // The condition for matching the rule. You can find all possibilities of condition in gRPC specs. + Condition []ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` } - type RuleConditionParameters struct { + // Action to perform if this rule matched. Possible values: ALLOW or DENY. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` -// Action to perform if this rule matched. Possible values: ALLOW or DENY. -// +kubebuilder:validation:Optional -Action *string `json:"action,omitempty" tf:"action,omitempty"` - -// The condition for matching the rule. You can find all possibilities of condition in gRPC specs. -// +kubebuilder:validation:Optional -Condition []ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + // The condition for matching the rule. You can find all possibilities of condition in gRPC specs. + // +kubebuilder:validation:Optional + Condition []ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` } - type SecurityProfileInitParameters struct { + // Advanced rate limiter profile ID to use with this security profile. Set empty to use default. + AdvancedRateLimiterProfileID *string `json:"advancedRateLimiterProfileId,omitempty" tf:"advanced_rate_limiter_profile_id,omitempty"` -// Advanced rate limiter profile ID to use with this security profile. Set empty to use default. -AdvancedRateLimiterProfileID *string `json:"advancedRateLimiterProfileId,omitempty" tf:"advanced_rate_limiter_profile_id,omitempty"` - -// Captcha ID to use with this security profile. Set empty to use default. -CaptchaID *string `json:"captchaId,omitempty" tf:"captcha_id,omitempty"` + // Captcha ID to use with this security profile. Set empty to use default. + CaptchaID *string `json:"captchaId,omitempty" tf:"captcha_id,omitempty"` -// ID of the security profile. -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + // ID of the security profile. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// Action to perform if none of rules matched. Possible values: ALLOW or DENY. -DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + // Action to perform if none of rules matched. Possible values: ALLOW or DENY. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` -// Optional description of the security profile. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Optional description of the security profile. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// ID of the folder to create a profile in. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // ID of the folder to create a profile in. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Labels as key:value pairs. Maximum of 64 per resource. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels as key:value pairs. Maximum of 64 per resource. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the security profile. The name is unique within the folder. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the security profile. The name is unique within the folder. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// List of security rules. The structure is documented below. -SecurityRule []SecurityRuleInitParameters `json:"securityRule,omitempty" tf:"security_rule,omitempty"` + // List of security rules. The structure is documented below. + SecurityRule []SecurityRuleInitParameters `json:"securityRule,omitempty" tf:"security_rule,omitempty"` } - type SecurityProfileObservation struct { + // Advanced rate limiter profile ID to use with this security profile. Set empty to use default. + AdvancedRateLimiterProfileID *string `json:"advancedRateLimiterProfileId,omitempty" tf:"advanced_rate_limiter_profile_id,omitempty"` -// Advanced rate limiter profile ID to use with this security profile. Set empty to use default. -AdvancedRateLimiterProfileID *string `json:"advancedRateLimiterProfileId,omitempty" tf:"advanced_rate_limiter_profile_id,omitempty"` - -// Captcha ID to use with this security profile. Set empty to use default. -CaptchaID *string `json:"captchaId,omitempty" tf:"captcha_id,omitempty"` + // Captcha ID to use with this security profile. Set empty to use default. + CaptchaID *string `json:"captchaId,omitempty" tf:"captcha_id,omitempty"` -// ID of the security profile. -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + // ID of the security profile. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// The Security Profile creation timestamp. -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // The Security Profile creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Action to perform if none of rules matched. Possible values: ALLOW or DENY. -DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + // Action to perform if none of rules matched. Possible values: ALLOW or DENY. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` -// Optional description of the security profile. -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Optional description of the security profile. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// ID of the folder to create a profile in. If omitted, the provider folder is used. -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // ID of the folder to create a profile in. If omitted, the provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the security profile. -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // ID of the security profile. + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Labels as key:value pairs. Maximum of 64 per resource. -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Labels as key:value pairs. Maximum of 64 per resource. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Name of the security profile. The name is unique within the folder. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the security profile. The name is unique within the folder. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// List of security rules. The structure is documented below. -SecurityRule []SecurityRuleObservation `json:"securityRule,omitempty" tf:"security_rule,omitempty"` + // List of security rules. The structure is documented below. + SecurityRule []SecurityRuleObservation `json:"securityRule,omitempty" tf:"security_rule,omitempty"` } - type SecurityProfileParameters struct { + // Advanced rate limiter profile ID to use with this security profile. Set empty to use default. + // +kubebuilder:validation:Optional + AdvancedRateLimiterProfileID *string `json:"advancedRateLimiterProfileId,omitempty" tf:"advanced_rate_limiter_profile_id,omitempty"` -// Advanced rate limiter profile ID to use with this security profile. Set empty to use default. -// +kubebuilder:validation:Optional -AdvancedRateLimiterProfileID *string `json:"advancedRateLimiterProfileId,omitempty" tf:"advanced_rate_limiter_profile_id,omitempty"` + // Captcha ID to use with this security profile. Set empty to use default. + // +kubebuilder:validation:Optional + CaptchaID *string `json:"captchaId,omitempty" tf:"captcha_id,omitempty"` -// Captcha ID to use with this security profile. Set empty to use default. -// +kubebuilder:validation:Optional -CaptchaID *string `json:"captchaId,omitempty" tf:"captcha_id,omitempty"` + // ID of the security profile. + // +kubebuilder:validation:Optional + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` -// ID of the security profile. -// +kubebuilder:validation:Optional -CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + // Action to perform if none of rules matched. Possible values: ALLOW or DENY. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` -// Action to perform if none of rules matched. Possible values: ALLOW or DENY. -// +kubebuilder:validation:Optional -DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + // Optional description of the security profile. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Optional description of the security profile. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // ID of the folder to create a profile in. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// ID of the folder to create a profile in. If omitted, the provider folder is used. -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Labels as key:value pairs. Maximum of 64 per resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Labels as key:value pairs. Maximum of 64 per resource. -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Name of the security profile. The name is unique within the folder. 1-50 characters long. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the security profile. The name is unique within the folder. 1-50 characters long. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// List of security rules. The structure is documented below. -// +kubebuilder:validation:Optional -SecurityRule []SecurityRuleParameters `json:"securityRule,omitempty" tf:"security_rule,omitempty"` + // List of security rules. The structure is documented below. + // +kubebuilder:validation:Optional + SecurityRule []SecurityRuleParameters `json:"securityRule,omitempty" tf:"security_rule,omitempty"` } - type SecurityRuleInitParameters struct { + // Optional description of the rule. 0-512 characters long. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Optional description of the rule. 0-512 characters long. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// This mode allows you to test your security profile or a single rule. -DryRun *bool `json:"dryRun,omitempty" tf:"dry_run,omitempty"` + // This mode allows you to test your security profile or a single rule. + DryRun *bool `json:"dryRun,omitempty" tf:"dry_run,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Determines the priority for checking the incoming traffic. -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // Determines the priority for checking the incoming traffic. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// Rule actions, see Rule actions. The structure is documented below. -RuleCondition []RuleConditionInitParameters `json:"ruleCondition,omitempty" tf:"rule_condition,omitempty"` + // Rule actions, see Rule actions. The structure is documented below. + RuleCondition []RuleConditionInitParameters `json:"ruleCondition,omitempty" tf:"rule_condition,omitempty"` -// Smart Protection rule, see Smart Protection rules. The structure is documented below. -SmartProtection []SmartProtectionInitParameters `json:"smartProtection,omitempty" tf:"smart_protection,omitempty"` + // Smart Protection rule, see Smart Protection rules. The structure is documented below. + SmartProtection []SmartProtectionInitParameters `json:"smartProtection,omitempty" tf:"smart_protection,omitempty"` -// Web Application Firewall (WAF) rule, see WAF rules. The structure is documented below. -Waf []WafInitParameters `json:"waf,omitempty" tf:"waf,omitempty"` + // Web Application Firewall (WAF) rule, see WAF rules. The structure is documented below. + Waf []WafInitParameters `json:"waf,omitempty" tf:"waf,omitempty"` } - type SecurityRuleObservation struct { + // Optional description of the rule. 0-512 characters long. + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Optional description of the rule. 0-512 characters long. -Description *string `json:"description,omitempty" tf:"description,omitempty"` - -// This mode allows you to test your security profile or a single rule. -DryRun *bool `json:"dryRun,omitempty" tf:"dry_run,omitempty"` + // This mode allows you to test your security profile or a single rule. + DryRun *bool `json:"dryRun,omitempty" tf:"dry_run,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Determines the priority for checking the incoming traffic. -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // Determines the priority for checking the incoming traffic. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// Rule actions, see Rule actions. The structure is documented below. -RuleCondition []RuleConditionObservation `json:"ruleCondition,omitempty" tf:"rule_condition,omitempty"` + // Rule actions, see Rule actions. The structure is documented below. + RuleCondition []RuleConditionObservation `json:"ruleCondition,omitempty" tf:"rule_condition,omitempty"` -// Smart Protection rule, see Smart Protection rules. The structure is documented below. -SmartProtection []SmartProtectionObservation `json:"smartProtection,omitempty" tf:"smart_protection,omitempty"` + // Smart Protection rule, see Smart Protection rules. The structure is documented below. + SmartProtection []SmartProtectionObservation `json:"smartProtection,omitempty" tf:"smart_protection,omitempty"` -// Web Application Firewall (WAF) rule, see WAF rules. The structure is documented below. -Waf []WafObservation `json:"waf,omitempty" tf:"waf,omitempty"` + // Web Application Firewall (WAF) rule, see WAF rules. The structure is documented below. + Waf []WafObservation `json:"waf,omitempty" tf:"waf,omitempty"` } - type SecurityRuleParameters struct { + // Optional description of the rule. 0-512 characters long. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Optional description of the rule. 0-512 characters long. -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // This mode allows you to test your security profile or a single rule. + // +kubebuilder:validation:Optional + DryRun *bool `json:"dryRun,omitempty" tf:"dry_run,omitempty"` -// This mode allows you to test your security profile or a single rule. -// +kubebuilder:validation:Optional -DryRun *bool `json:"dryRun,omitempty" tf:"dry_run,omitempty"` + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Determines the priority for checking the incoming traffic. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` -// Determines the priority for checking the incoming traffic. -// +kubebuilder:validation:Optional -Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + // Rule actions, see Rule actions. The structure is documented below. + // +kubebuilder:validation:Optional + RuleCondition []RuleConditionParameters `json:"ruleCondition,omitempty" tf:"rule_condition,omitempty"` -// Rule actions, see Rule actions. The structure is documented below. -// +kubebuilder:validation:Optional -RuleCondition []RuleConditionParameters `json:"ruleCondition,omitempty" tf:"rule_condition,omitempty"` + // Smart Protection rule, see Smart Protection rules. The structure is documented below. + // +kubebuilder:validation:Optional + SmartProtection []SmartProtectionParameters `json:"smartProtection,omitempty" tf:"smart_protection,omitempty"` -// Smart Protection rule, see Smart Protection rules. The structure is documented below. -// +kubebuilder:validation:Optional -SmartProtection []SmartProtectionParameters `json:"smartProtection,omitempty" tf:"smart_protection,omitempty"` - -// Web Application Firewall (WAF) rule, see WAF rules. The structure is documented below. -// +kubebuilder:validation:Optional -Waf []WafParameters `json:"waf,omitempty" tf:"waf,omitempty"` + // Web Application Firewall (WAF) rule, see WAF rules. The structure is documented below. + // +kubebuilder:validation:Optional + Waf []WafParameters `json:"waf,omitempty" tf:"waf,omitempty"` } - type SmartProtectionConditionInitParameters struct { + Authority []ConditionAuthorityInitParameters `json:"authority,omitempty" tf:"authority,omitempty"` + HTTPMethod []ConditionHTTPMethodInitParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -Authority []ConditionAuthorityInitParameters `json:"authority,omitempty" tf:"authority,omitempty"` - -HTTPMethod []ConditionHTTPMethodInitParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` - -Headers []ConditionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + Headers []ConditionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` -RequestURI []ConditionRequestURIInitParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + RequestURI []ConditionRequestURIInitParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` -SourceIP []ConditionSourceIPInitParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + SourceIP []ConditionSourceIPInitParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type SmartProtectionConditionObservation struct { + Authority []ConditionAuthorityObservation `json:"authority,omitempty" tf:"authority,omitempty"` + HTTPMethod []ConditionHTTPMethodObservation `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -Authority []ConditionAuthorityObservation `json:"authority,omitempty" tf:"authority,omitempty"` - -HTTPMethod []ConditionHTTPMethodObservation `json:"httpMethod,omitempty" tf:"http_method,omitempty"` - -Headers []ConditionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + Headers []ConditionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` -RequestURI []ConditionRequestURIObservation `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + RequestURI []ConditionRequestURIObservation `json:"requestUri,omitempty" tf:"request_uri,omitempty"` -SourceIP []ConditionSourceIPObservation `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + SourceIP []ConditionSourceIPObservation `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type SmartProtectionConditionParameters struct { + // +kubebuilder:validation:Optional + Authority []ConditionAuthorityParameters `json:"authority,omitempty" tf:"authority,omitempty"` -// +kubebuilder:validation:Optional -Authority []ConditionAuthorityParameters `json:"authority,omitempty" tf:"authority,omitempty"` - -// +kubebuilder:validation:Optional -HTTPMethod []ConditionHTTPMethodParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + // +kubebuilder:validation:Optional + HTTPMethod []ConditionHTTPMethodParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -// +kubebuilder:validation:Optional -Headers []ConditionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + // +kubebuilder:validation:Optional + Headers []ConditionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` -// +kubebuilder:validation:Optional -RequestURI []ConditionRequestURIParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + // +kubebuilder:validation:Optional + RequestURI []ConditionRequestURIParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` -// +kubebuilder:validation:Optional -SourceIP []ConditionSourceIPParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + // +kubebuilder:validation:Optional + SourceIP []ConditionSourceIPParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type SmartProtectionInitParameters struct { + // The condition for matching the rule. You can find all possibilities of condition in gRPC specs. + Condition []SmartProtectionConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` -// The condition for matching the rule. You can find all possibilities of condition in gRPC specs. -Condition []SmartProtectionConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` - -// Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` } - type SmartProtectionObservation struct { + // The condition for matching the rule. You can find all possibilities of condition in gRPC specs. + Condition []SmartProtectionConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` -// The condition for matching the rule. You can find all possibilities of condition in gRPC specs. -Condition []SmartProtectionConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` - -// Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` } - type SmartProtectionParameters struct { + // The condition for matching the rule. You can find all possibilities of condition in gRPC specs. + // +kubebuilder:validation:Optional + Condition []SmartProtectionConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` -// The condition for matching the rule. You can find all possibilities of condition in gRPC specs. -// +kubebuilder:validation:Optional -Condition []SmartProtectionConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` - -// Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). -// +kubebuilder:validation:Optional -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` } - type SourceIPGeoIPMatchInitParameters struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type SourceIPGeoIPMatchObservation struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type SourceIPGeoIPMatchParameters struct { - -// +kubebuilder:validation:Optional -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // +kubebuilder:validation:Optional + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type SourceIPGeoIPNotMatchInitParameters struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type SourceIPGeoIPNotMatchObservation struct { - - -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type SourceIPGeoIPNotMatchParameters struct { - -// +kubebuilder:validation:Optional -Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + // +kubebuilder:validation:Optional + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` } - type SourceIPIPRangesMatchInitParameters struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type SourceIPIPRangesMatchObservation struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type SourceIPIPRangesMatchParameters struct { - -// +kubebuilder:validation:Optional -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // +kubebuilder:validation:Optional + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type SourceIPIPRangesNotMatchInitParameters struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type SourceIPIPRangesNotMatchObservation struct { - - -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type SourceIPIPRangesNotMatchParameters struct { - -// +kubebuilder:validation:Optional -IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` + // +kubebuilder:validation:Optional + IPRanges []*string `json:"ipRanges,omitempty" tf:"ip_ranges,omitempty"` } - type SourceIPInitParameters struct { + GeoIPMatch []GeoIPMatchInitParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` + GeoIPNotMatch []GeoIPNotMatchInitParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -GeoIPMatch []GeoIPMatchInitParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` - -GeoIPNotMatch []GeoIPNotMatchInitParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` + IPRangesMatch []IPRangesMatchInitParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -IPRangesMatch []IPRangesMatchInitParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` - -IPRangesNotMatch []IPRangesNotMatchInitParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + IPRangesNotMatch []IPRangesNotMatchInitParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type SourceIPObservation struct { + GeoIPMatch []GeoIPMatchObservation `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` + GeoIPNotMatch []GeoIPNotMatchObservation `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -GeoIPMatch []GeoIPMatchObservation `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` - -GeoIPNotMatch []GeoIPNotMatchObservation `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` + IPRangesMatch []IPRangesMatchObservation `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -IPRangesMatch []IPRangesMatchObservation `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` - -IPRangesNotMatch []IPRangesNotMatchObservation `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + IPRangesNotMatch []IPRangesNotMatchObservation `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type SourceIPParameters struct { + // +kubebuilder:validation:Optional + GeoIPMatch []GeoIPMatchParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` -// +kubebuilder:validation:Optional -GeoIPMatch []GeoIPMatchParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` + // +kubebuilder:validation:Optional + GeoIPNotMatch []GeoIPNotMatchParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -// +kubebuilder:validation:Optional -GeoIPNotMatch []GeoIPNotMatchParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` + // +kubebuilder:validation:Optional + IPRangesMatch []IPRangesMatchParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -// +kubebuilder:validation:Optional -IPRangesMatch []IPRangesMatchParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` - -// +kubebuilder:validation:Optional -IPRangesNotMatch []IPRangesNotMatchParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + // +kubebuilder:validation:Optional + IPRangesNotMatch []IPRangesNotMatchParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type ValueInitParameters struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ValueObservation struct { + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` - -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type ValueParameters struct { + // +kubebuilder:validation:Optional + ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` -// +kubebuilder:validation:Optional -ExactMatch *string `json:"exactMatch,omitempty" tf:"exact_match,omitempty"` + // +kubebuilder:validation:Optional + ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` -// +kubebuilder:validation:Optional -ExactNotMatch *string `json:"exactNotMatch,omitempty" tf:"exact_not_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexMatch *string `json:"pireRegexMatch,omitempty" tf:"pire_regex_match,omitempty"` + // +kubebuilder:validation:Optional + PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` -// +kubebuilder:validation:Optional -PireRegexNotMatch *string `json:"pireRegexNotMatch,omitempty" tf:"pire_regex_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` -// +kubebuilder:validation:Optional -PrefixMatch *string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` - -// +kubebuilder:validation:Optional -PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` + // +kubebuilder:validation:Optional + PrefixNotMatch *string `json:"prefixNotMatch,omitempty" tf:"prefix_not_match,omitempty"` } - type WafConditionAuthorityInitParameters struct { - - -Authorities []ConditionAuthorityAuthoritiesInitParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` + Authorities []ConditionAuthorityAuthoritiesInitParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` } - type WafConditionAuthorityObservation struct { - - -Authorities []ConditionAuthorityAuthoritiesObservation `json:"authorities,omitempty" tf:"authorities,omitempty"` + Authorities []ConditionAuthorityAuthoritiesObservation `json:"authorities,omitempty" tf:"authorities,omitempty"` } - type WafConditionAuthorityParameters struct { - -// +kubebuilder:validation:Optional -Authorities []ConditionAuthorityAuthoritiesParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` + // +kubebuilder:validation:Optional + Authorities []ConditionAuthorityAuthoritiesParameters `json:"authorities,omitempty" tf:"authorities,omitempty"` } - type WafConditionHTTPMethodInitParameters struct { - - -HTTPMethods []ConditionHTTPMethodHTTPMethodsInitParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` + HTTPMethods []ConditionHTTPMethodHTTPMethodsInitParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` } - type WafConditionHTTPMethodObservation struct { - - -HTTPMethods []ConditionHTTPMethodHTTPMethodsObservation `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` + HTTPMethods []ConditionHTTPMethodHTTPMethodsObservation `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` } - type WafConditionHTTPMethodParameters struct { - -// +kubebuilder:validation:Optional -HTTPMethods []ConditionHTTPMethodHTTPMethodsParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` + // +kubebuilder:validation:Optional + HTTPMethods []ConditionHTTPMethodHTTPMethodsParameters `json:"httpMethods,omitempty" tf:"http_methods,omitempty"` } - type WafConditionHeadersInitParameters struct { + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []ConditionHeadersValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` + Value []ConditionHeadersValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` } - type WafConditionHeadersObservation struct { + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -Value []ConditionHeadersValueObservation `json:"value,omitempty" tf:"value,omitempty"` + Value []ConditionHeadersValueObservation `json:"value,omitempty" tf:"value,omitempty"` } - type WafConditionHeadersParameters struct { + // Name of the rule. The name is unique within the security profile. 1-50 characters long. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Name of the rule. The name is unique within the security profile. 1-50 characters long. -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// +kubebuilder:validation:Optional -Value []ConditionHeadersValueParameters `json:"value" tf:"value,omitempty"` + // +kubebuilder:validation:Optional + Value []ConditionHeadersValueParameters `json:"value" tf:"value,omitempty"` } - type WafConditionInitParameters struct { + Authority []WafConditionAuthorityInitParameters `json:"authority,omitempty" tf:"authority,omitempty"` + HTTPMethod []WafConditionHTTPMethodInitParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -Authority []WafConditionAuthorityInitParameters `json:"authority,omitempty" tf:"authority,omitempty"` - -HTTPMethod []WafConditionHTTPMethodInitParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` - -Headers []WafConditionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + Headers []WafConditionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` -RequestURI []WafConditionRequestURIInitParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + RequestURI []WafConditionRequestURIInitParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` -SourceIP []WafConditionSourceIPInitParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + SourceIP []WafConditionSourceIPInitParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type WafConditionObservation struct { + Authority []WafConditionAuthorityObservation `json:"authority,omitempty" tf:"authority,omitempty"` + HTTPMethod []WafConditionHTTPMethodObservation `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -Authority []WafConditionAuthorityObservation `json:"authority,omitempty" tf:"authority,omitempty"` - -HTTPMethod []WafConditionHTTPMethodObservation `json:"httpMethod,omitempty" tf:"http_method,omitempty"` - -Headers []WafConditionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + Headers []WafConditionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` -RequestURI []WafConditionRequestURIObservation `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + RequestURI []WafConditionRequestURIObservation `json:"requestUri,omitempty" tf:"request_uri,omitempty"` -SourceIP []WafConditionSourceIPObservation `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + SourceIP []WafConditionSourceIPObservation `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type WafConditionParameters struct { + // +kubebuilder:validation:Optional + Authority []WafConditionAuthorityParameters `json:"authority,omitempty" tf:"authority,omitempty"` -// +kubebuilder:validation:Optional -Authority []WafConditionAuthorityParameters `json:"authority,omitempty" tf:"authority,omitempty"` - -// +kubebuilder:validation:Optional -HTTPMethod []WafConditionHTTPMethodParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + // +kubebuilder:validation:Optional + HTTPMethod []WafConditionHTTPMethodParameters `json:"httpMethod,omitempty" tf:"http_method,omitempty"` -// +kubebuilder:validation:Optional -Headers []WafConditionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + // +kubebuilder:validation:Optional + Headers []WafConditionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` -// +kubebuilder:validation:Optional -RequestURI []WafConditionRequestURIParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` + // +kubebuilder:validation:Optional + RequestURI []WafConditionRequestURIParameters `json:"requestUri,omitempty" tf:"request_uri,omitempty"` -// +kubebuilder:validation:Optional -SourceIP []WafConditionSourceIPParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` + // +kubebuilder:validation:Optional + SourceIP []WafConditionSourceIPParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` } - type WafConditionRequestURIInitParameters struct { + Path []ConditionRequestURIPathInitParameters `json:"path,omitempty" tf:"path,omitempty"` - -Path []ConditionRequestURIPathInitParameters `json:"path,omitempty" tf:"path,omitempty"` - -Queries []ConditionRequestURIQueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` + Queries []ConditionRequestURIQueriesInitParameters `json:"queries,omitempty" tf:"queries,omitempty"` } - type WafConditionRequestURIObservation struct { + Path []ConditionRequestURIPathObservation `json:"path,omitempty" tf:"path,omitempty"` - -Path []ConditionRequestURIPathObservation `json:"path,omitempty" tf:"path,omitempty"` - -Queries []ConditionRequestURIQueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` + Queries []ConditionRequestURIQueriesObservation `json:"queries,omitempty" tf:"queries,omitempty"` } - type WafConditionRequestURIParameters struct { + // +kubebuilder:validation:Optional + Path []ConditionRequestURIPathParameters `json:"path,omitempty" tf:"path,omitempty"` -// +kubebuilder:validation:Optional -Path []ConditionRequestURIPathParameters `json:"path,omitempty" tf:"path,omitempty"` - -// +kubebuilder:validation:Optional -Queries []ConditionRequestURIQueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` + // +kubebuilder:validation:Optional + Queries []ConditionRequestURIQueriesParameters `json:"queries,omitempty" tf:"queries,omitempty"` } - type WafConditionSourceIPInitParameters struct { + GeoIPMatch []ConditionSourceIPGeoIPMatchInitParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` + GeoIPNotMatch []ConditionSourceIPGeoIPNotMatchInitParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -GeoIPMatch []ConditionSourceIPGeoIPMatchInitParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` - -GeoIPNotMatch []ConditionSourceIPGeoIPNotMatchInitParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` - -IPRangesMatch []ConditionSourceIPIPRangesMatchInitParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` + IPRangesMatch []ConditionSourceIPIPRangesMatchInitParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -IPRangesNotMatch []ConditionSourceIPIPRangesNotMatchInitParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + IPRangesNotMatch []ConditionSourceIPIPRangesNotMatchInitParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type WafConditionSourceIPObservation struct { + GeoIPMatch []ConditionSourceIPGeoIPMatchObservation `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` + GeoIPNotMatch []ConditionSourceIPGeoIPNotMatchObservation `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -GeoIPMatch []ConditionSourceIPGeoIPMatchObservation `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` - -GeoIPNotMatch []ConditionSourceIPGeoIPNotMatchObservation `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` - -IPRangesMatch []ConditionSourceIPIPRangesMatchObservation `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` + IPRangesMatch []ConditionSourceIPIPRangesMatchObservation `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -IPRangesNotMatch []ConditionSourceIPIPRangesNotMatchObservation `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + IPRangesNotMatch []ConditionSourceIPIPRangesNotMatchObservation `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type WafConditionSourceIPParameters struct { + // +kubebuilder:validation:Optional + GeoIPMatch []ConditionSourceIPGeoIPMatchParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` -// +kubebuilder:validation:Optional -GeoIPMatch []ConditionSourceIPGeoIPMatchParameters `json:"geoIpMatch,omitempty" tf:"geo_ip_match,omitempty"` - -// +kubebuilder:validation:Optional -GeoIPNotMatch []ConditionSourceIPGeoIPNotMatchParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` + // +kubebuilder:validation:Optional + GeoIPNotMatch []ConditionSourceIPGeoIPNotMatchParameters `json:"geoIpNotMatch,omitempty" tf:"geo_ip_not_match,omitempty"` -// +kubebuilder:validation:Optional -IPRangesMatch []ConditionSourceIPIPRangesMatchParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` + // +kubebuilder:validation:Optional + IPRangesMatch []ConditionSourceIPIPRangesMatchParameters `json:"ipRangesMatch,omitempty" tf:"ip_ranges_match,omitempty"` -// +kubebuilder:validation:Optional -IPRangesNotMatch []ConditionSourceIPIPRangesNotMatchParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` + // +kubebuilder:validation:Optional + IPRangesNotMatch []ConditionSourceIPIPRangesNotMatchParameters `json:"ipRangesNotMatch,omitempty" tf:"ip_ranges_not_match,omitempty"` } - type WafInitParameters struct { + // The condition for matching the rule. You can find all possibilities of condition in gRPC specs. + Condition []WafConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` -// The condition for matching the rule. You can find all possibilities of condition in gRPC specs. -Condition []WafConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + // Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` - -// ID of WAF profile to use in this rule. -WafProfileID *string `json:"wafProfileId,omitempty" tf:"waf_profile_id,omitempty"` + // ID of WAF profile to use in this rule. + WafProfileID *string `json:"wafProfileId,omitempty" tf:"waf_profile_id,omitempty"` } - type WafObservation struct { + // The condition for matching the rule. You can find all possibilities of condition in gRPC specs. + Condition []WafConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` -// The condition for matching the rule. You can find all possibilities of condition in gRPC specs. -Condition []WafConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` - -// Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// ID of WAF profile to use in this rule. -WafProfileID *string `json:"wafProfileId,omitempty" tf:"waf_profile_id,omitempty"` + // ID of WAF profile to use in this rule. + WafProfileID *string `json:"wafProfileId,omitempty" tf:"waf_profile_id,omitempty"` } - type WafParameters struct { + // The condition for matching the rule. You can find all possibilities of condition in gRPC specs. + // +kubebuilder:validation:Optional + Condition []WafConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` -// The condition for matching the rule. You can find all possibilities of condition in gRPC specs. -// +kubebuilder:validation:Optional -Condition []WafConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` - -// Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). -// +kubebuilder:validation:Optional -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Mode of protection. Possible values: FULL (full protection means that the traffic will be checked based on ML models and behavioral analysis, with suspicious requests being sent to SmartCaptcha) or API (API protection means checking the traffic based on ML models and behavioral analysis without sending suspicious requests to SmartCaptcha. The suspicious requests will be blocked). + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// ID of WAF profile to use in this rule. -// +kubebuilder:validation:Optional -WafProfileID *string `json:"wafProfileId" tf:"waf_profile_id,omitempty"` + // ID of WAF profile to use in this rule. + // +kubebuilder:validation:Optional + WafProfileID *string `json:"wafProfileId" tf:"waf_profile_id,omitempty"` } // SecurityProfileSpec defines the desired state of SecurityProfile type SecurityProfileSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider SecurityProfileParameters `json:"forProvider"` + ForProvider SecurityProfileParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -2160,20 +1747,19 @@ type SecurityProfileSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider SecurityProfileInitParameters `json:"initProvider,omitempty"` + InitProvider SecurityProfileInitParameters `json:"initProvider,omitempty"` } // SecurityProfileStatus defines the observed state of SecurityProfile. type SecurityProfileStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider SecurityProfileObservation `json:"atProvider,omitempty"` + AtProvider SecurityProfileObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // SecurityProfile is the Schema for the SecurityProfiles API. With security profiles you can protect your infrastructure from DDoS attacks at the application level (L7). // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..ab98d26 --- /dev/null +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,100 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfig) DeepCopyInto(out *StoreConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfig. +func (in *StoreConfig) DeepCopy() *StoreConfig { + if in == nil { + return nil + } + out := new(StoreConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StoreConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfigList) DeepCopyInto(out *StoreConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StoreConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfigList. +func (in *StoreConfigList) DeepCopy() *StoreConfigList { + if in == nil { + return nil + } + out := new(StoreConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StoreConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfigSpec) DeepCopyInto(out *StoreConfigSpec) { + *out = *in + in.SecretStoreConfig.DeepCopyInto(&out.SecretStoreConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfigSpec. +func (in *StoreConfigSpec) DeepCopy() *StoreConfigSpec { + if in == nil { + return nil + } + out := new(StoreConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfigStatus) DeepCopyInto(out *StoreConfigStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfigStatus. +func (in *StoreConfigStatus) DeepCopy() *StoreConfigStatus { + if in == nil { + return nil + } + out := new(StoreConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/v1beta1/zz_generated.deepcopy.go b/apis/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..cdfcb36 --- /dev/null +++ b/apis/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,174 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfig) DeepCopyInto(out *ProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfig. +func (in *ProviderConfig) DeepCopy() *ProviderConfig { + if in == nil { + return nil + } + out := new(ProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigList) DeepCopyInto(out *ProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigList. +func (in *ProviderConfigList) DeepCopy() *ProviderConfigList { + if in == nil { + return nil + } + out := new(ProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigSpec) DeepCopyInto(out *ProviderConfigSpec) { + *out = *in + in.Credentials.DeepCopyInto(&out.Credentials) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigSpec. +func (in *ProviderConfigSpec) DeepCopy() *ProviderConfigSpec { + if in == nil { + return nil + } + out := new(ProviderConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigStatus) DeepCopyInto(out *ProviderConfigStatus) { + *out = *in + in.ProviderConfigStatus.DeepCopyInto(&out.ProviderConfigStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigStatus. +func (in *ProviderConfigStatus) DeepCopy() *ProviderConfigStatus { + if in == nil { + return nil + } + out := new(ProviderConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigUsage) DeepCopyInto(out *ProviderConfigUsage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.ProviderConfigUsage.DeepCopyInto(&out.ProviderConfigUsage) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsage. +func (in *ProviderConfigUsage) DeepCopy() *ProviderConfigUsage { + if in == nil { + return nil + } + out := new(ProviderConfigUsage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfigUsage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigUsageList) DeepCopyInto(out *ProviderConfigUsageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProviderConfigUsage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsageList. +func (in *ProviderConfigUsageList) DeepCopy() *ProviderConfigUsageList { + if in == nil { + return nil + } + out := new(ProviderConfigUsageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfigUsageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderCredentials) DeepCopyInto(out *ProviderCredentials) { + *out = *in + in.CommonCredentialSelectors.DeepCopyInto(&out.CommonCredentialSelectors) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderCredentials. +func (in *ProviderCredentials) DeepCopy() *ProviderCredentials { + if in == nil { + return nil + } + out := new(ProviderCredentials) + in.DeepCopyInto(out) + return out +} diff --git a/apis/v1beta1/zz_generated.pc.go b/apis/v1beta1/zz_generated.pc.go new file mode 100644 index 0000000..f699091 --- /dev/null +++ b/apis/v1beta1/zz_generated.pc.go @@ -0,0 +1,25 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ProviderConfig. +func (p *ProviderConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return p.Status.GetCondition(ct) +} + +// GetUsers of this ProviderConfig. +func (p *ProviderConfig) GetUsers() int64 { + return p.Status.Users +} + +// SetConditions of this ProviderConfig. +func (p *ProviderConfig) SetConditions(c ...xpv1.Condition) { + p.Status.SetConditions(c...) +} + +// SetUsers of this ProviderConfig. +func (p *ProviderConfig) SetUsers(i int64) { + p.Status.Users = i +} diff --git a/apis/v1beta1/zz_generated.pcu.go b/apis/v1beta1/zz_generated.pcu.go new file mode 100644 index 0000000..3d6f489 --- /dev/null +++ b/apis/v1beta1/zz_generated.pcu.go @@ -0,0 +1,25 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetProviderConfigReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) GetProviderConfigReference() xpv1.Reference { + return p.ProviderConfigReference +} + +// GetResourceReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) GetResourceReference() xpv1.TypedReference { + return p.ResourceReference +} + +// SetProviderConfigReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) SetProviderConfigReference(r xpv1.Reference) { + p.ProviderConfigReference = r +} + +// SetResourceReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) SetResourceReference(r xpv1.TypedReference) { + p.ResourceReference = r +} diff --git a/apis/v1beta1/zz_generated.pculist.go b/apis/v1beta1/zz_generated.pculist.go new file mode 100644 index 0000000..15ebd1b --- /dev/null +++ b/apis/v1beta1/zz_generated.pculist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ProviderConfigUsageList. +func (p *ProviderConfigUsageList) GetItems() []resource.ProviderConfigUsage { + items := make([]resource.ProviderConfigUsage, len(p.Items)) + for i := range p.Items { + items[i] = &p.Items[i] + } + return items +} diff --git a/apis/vpc/v1alpha1/zz_address_terraformed.go b/apis/vpc/v1alpha1/zz_address_terraformed.go new file mode 100755 index 0000000..d726558 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_address_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Address +func (mg *Address) GetTerraformResourceType() string { + return "yandex_vpc_address" +} + +// GetConnectionDetailsMapping for this Address +func (tr *Address) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Address +func (tr *Address) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Address +func (tr *Address) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Address +func (tr *Address) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Address +func (tr *Address) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Address +func (tr *Address) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Address +func (tr *Address) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Address +func (tr *Address) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Address using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Address) LateInitialize(attrs []byte) (bool, error) { + params := &AddressParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Address) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/vpc/v1alpha1/zz_address_types.go b/apis/vpc/v1alpha1/zz_address_types.go new file mode 100755 index 0000000..22ebc44 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_address_types.go @@ -0,0 +1,273 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AddressInitParameters struct { + + // DNS record specification of address + DNSRecord []DNSRecordInitParameters `json:"dnsRecord,omitempty" tf:"dns_record,omitempty"` + + // Flag that protects the address from accidental deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of this resource. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // spec of IP v4 address + ExternalIPv4Address []ExternalIPv4AddressInitParameters `json:"externalIpv4Address,omitempty" tf:"external_ipv4_address,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to apply to this resource. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the address. Provided by the client when the address is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AddressObservation struct { + + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // DNS record specification of address + DNSRecord []DNSRecordObservation `json:"dnsRecord,omitempty" tf:"dns_record,omitempty"` + + // Flag that protects the address from accidental deletion. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of this resource. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // spec of IP v4 address + ExternalIPv4Address []ExternalIPv4AddressObservation `json:"externalIpv4Address,omitempty" tf:"external_ipv4_address,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to apply to this resource. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the address. Provided by the client when the address is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // false means that address is ephemeral. + Reserved *bool `json:"reserved,omitempty" tf:"reserved,omitempty"` + + // true if address is used. + Used *bool `json:"used,omitempty" tf:"used,omitempty"` +} + +type AddressParameters struct { + + // DNS record specification of address + // +kubebuilder:validation:Optional + DNSRecord []DNSRecordParameters `json:"dnsRecord,omitempty" tf:"dns_record,omitempty"` + + // Flag that protects the address from accidental deletion. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // An optional description of this resource. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // spec of IP v4 address + // +kubebuilder:validation:Optional + ExternalIPv4Address []ExternalIPv4AddressParameters `json:"externalIpv4Address,omitempty" tf:"external_ipv4_address,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to apply to this resource. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the address. Provided by the client when the address is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type DNSRecordInitParameters struct { + + // DNS zone id to create record at. + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // FQDN for record to address + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // If PTR record is needed + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // TTL of DNS record + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type DNSRecordObservation struct { + + // DNS zone id to create record at. + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // FQDN for record to address + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // If PTR record is needed + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // TTL of DNS record + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type DNSRecordParameters struct { + + // DNS zone id to create record at. + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId" tf:"dns_zone_id,omitempty"` + + // FQDN for record to address + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` + + // If PTR record is needed + // +kubebuilder:validation:Optional + Ptr *bool `json:"ptr,omitempty" tf:"ptr,omitempty"` + + // TTL of DNS record + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type ExternalIPv4AddressInitParameters struct { + + // Enable DDOS protection. Possible values are: "qrator" + DdosProtectionProvider *string `json:"ddosProtectionProvider,omitempty" tf:"ddos_protection_provider,omitempty"` + + // Wanted outgoing smtp capability. + OutgoingSMTPCapability *string `json:"outgoingSmtpCapability,omitempty" tf:"outgoing_smtp_capability,omitempty"` + + // Zone for allocating address. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type ExternalIPv4AddressObservation struct { + + // Allocated IP address. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // Enable DDOS protection. Possible values are: "qrator" + DdosProtectionProvider *string `json:"ddosProtectionProvider,omitempty" tf:"ddos_protection_provider,omitempty"` + + // Wanted outgoing smtp capability. + OutgoingSMTPCapability *string `json:"outgoingSmtpCapability,omitempty" tf:"outgoing_smtp_capability,omitempty"` + + // Zone for allocating address. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type ExternalIPv4AddressParameters struct { + + // Enable DDOS protection. Possible values are: "qrator" + // +kubebuilder:validation:Optional + DdosProtectionProvider *string `json:"ddosProtectionProvider,omitempty" tf:"ddos_protection_provider,omitempty"` + + // Wanted outgoing smtp capability. + // +kubebuilder:validation:Optional + OutgoingSMTPCapability *string `json:"outgoingSmtpCapability,omitempty" tf:"outgoing_smtp_capability,omitempty"` + + // Zone for allocating address. + // +kubebuilder:validation:Optional + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +// AddressSpec defines the desired state of Address +type AddressSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AddressParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AddressInitParameters `json:"initProvider,omitempty"` +} + +// AddressStatus defines the observed state of Address. +type AddressStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AddressObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Address is the Schema for the Addresss API. Manages a VPC address within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Address struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AddressSpec `json:"spec"` + Status AddressStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AddressList contains a list of Addresss +type AddressList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Address `json:"items"` +} + +// Repository type metadata. +var ( + Address_Kind = "Address" + Address_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Address_Kind}.String() + Address_KindAPIVersion = Address_Kind + "." + CRDGroupVersion.String() + Address_GroupVersionKind = CRDGroupVersion.WithKind(Address_Kind) +) + +func init() { + SchemeBuilder.Register(&Address{}, &AddressList{}) +} diff --git a/apis/vpc/v1alpha1/zz_defaultsecuritygroup_terraformed.go b/apis/vpc/v1alpha1/zz_defaultsecuritygroup_terraformed.go new file mode 100755 index 0000000..22ed4fd --- /dev/null +++ b/apis/vpc/v1alpha1/zz_defaultsecuritygroup_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DefaultSecurityGroup +func (mg *DefaultSecurityGroup) GetTerraformResourceType() string { + return "yandex_vpc_default_security_group" +} + +// GetConnectionDetailsMapping for this DefaultSecurityGroup +func (tr *DefaultSecurityGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DefaultSecurityGroup +func (tr *DefaultSecurityGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DefaultSecurityGroup +func (tr *DefaultSecurityGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DefaultSecurityGroup +func (tr *DefaultSecurityGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DefaultSecurityGroup +func (tr *DefaultSecurityGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DefaultSecurityGroup +func (tr *DefaultSecurityGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DefaultSecurityGroup +func (tr *DefaultSecurityGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DefaultSecurityGroup +func (tr *DefaultSecurityGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DefaultSecurityGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DefaultSecurityGroup) LateInitialize(attrs []byte) (bool, error) { + params := &DefaultSecurityGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DefaultSecurityGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/vpc/v1alpha1/zz_defaultsecuritygroup_types.go b/apis/vpc/v1alpha1/zz_defaultsecuritygroup_types.go new file mode 100755 index 0000000..dfd1a99 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_defaultsecuritygroup_types.go @@ -0,0 +1,421 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DefaultSecurityGroupInitParameters struct { + + // Description of the security group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of egress rules. The structure is documented below. + Egress []EgressInitParameters `json:"egress,omitempty" tf:"egress,omitempty"` + + // ID of the folder this security group belongs to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A list of ingress rules. + Ingress []IngressInitParameters `json:"ingress,omitempty" tf:"ingress,omitempty"` + + // Labels to assign to this security group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // ID of the network this security group belongs to. + // +crossplane:generate:reference:type=Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` +} + +type DefaultSecurityGroupObservation struct { + + // Creation timestamp of this security group. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the security group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of egress rules. The structure is documented below. + Egress []EgressObservation `json:"egress,omitempty" tf:"egress,omitempty"` + + // ID of the folder this security group belongs to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Id of the security group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A list of ingress rules. + Ingress []IngressObservation `json:"ingress,omitempty" tf:"ingress,omitempty"` + + // Labels to assign to this security group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of this security group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this security group belongs to. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Status of this security group. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type DefaultSecurityGroupParameters struct { + + // Description of the security group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of egress rules. The structure is documented below. + // +kubebuilder:validation:Optional + Egress []EgressParameters `json:"egress,omitempty" tf:"egress,omitempty"` + + // ID of the folder this security group belongs to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A list of ingress rules. + // +kubebuilder:validation:Optional + Ingress []IngressParameters `json:"ingress,omitempty" tf:"ingress,omitempty"` + + // Labels to assign to this security group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // ID of the network this security group belongs to. + // +crossplane:generate:reference:type=Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` +} + +type EgressInitParameters struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this security group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type EgressObservation struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Id of the security group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to assign to this security group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type EgressParameters struct { + + // Description of the rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this security group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + // +kubebuilder:validation:Optional + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + // +kubebuilder:validation:Optional + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + // +kubebuilder:validation:Optional + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + // +kubebuilder:validation:Optional + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type IngressInitParameters struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this rule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type IngressObservation struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Id of the security group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to assign to this rule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type IngressParameters struct { + + // Description of the rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this rule. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + // +kubebuilder:validation:Optional + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + // +kubebuilder:validation:Optional + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + // +kubebuilder:validation:Optional + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + // +kubebuilder:validation:Optional + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +// DefaultSecurityGroupSpec defines the desired state of DefaultSecurityGroup +type DefaultSecurityGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DefaultSecurityGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DefaultSecurityGroupInitParameters `json:"initProvider,omitempty"` +} + +// DefaultSecurityGroupStatus defines the observed state of DefaultSecurityGroup. +type DefaultSecurityGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DefaultSecurityGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// DefaultSecurityGroup is the Schema for the DefaultSecurityGroups API. Yandex VPC Default Security Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type DefaultSecurityGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DefaultSecurityGroupSpec `json:"spec"` + Status DefaultSecurityGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DefaultSecurityGroupList contains a list of DefaultSecurityGroups +type DefaultSecurityGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DefaultSecurityGroup `json:"items"` +} + +// Repository type metadata. +var ( + DefaultSecurityGroup_Kind = "DefaultSecurityGroup" + DefaultSecurityGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DefaultSecurityGroup_Kind}.String() + DefaultSecurityGroup_KindAPIVersion = DefaultSecurityGroup_Kind + "." + CRDGroupVersion.String() + DefaultSecurityGroup_GroupVersionKind = CRDGroupVersion.WithKind(DefaultSecurityGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&DefaultSecurityGroup{}, &DefaultSecurityGroupList{}) +} diff --git a/apis/vpc/v1alpha1/zz_gateway_terraformed.go b/apis/vpc/v1alpha1/zz_gateway_terraformed.go new file mode 100755 index 0000000..cd13ec4 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_gateway_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Gateway +func (mg *Gateway) GetTerraformResourceType() string { + return "yandex_vpc_gateway" +} + +// GetConnectionDetailsMapping for this Gateway +func (tr *Gateway) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Gateway +func (tr *Gateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Gateway +func (tr *Gateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Gateway +func (tr *Gateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Gateway +func (tr *Gateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Gateway +func (tr *Gateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Gateway +func (tr *Gateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Gateway +func (tr *Gateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Gateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Gateway) LateInitialize(attrs []byte) (bool, error) { + params := &GatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Gateway) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/vpc/v1alpha1/zz_gateway_types.go b/apis/vpc/v1alpha1/zz_gateway_types.go new file mode 100755 index 0000000..6e4835b --- /dev/null +++ b/apis/vpc/v1alpha1/zz_gateway_types.go @@ -0,0 +1,165 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GatewayInitParameters struct { + + // An optional description of this resource. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to apply to this VPC Gateway. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the VPC Gateway. Provided by the client when the VPC Gateway is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Shared egress gateway configuration. Currently empty. + SharedEgressGateway []SharedEgressGatewayInitParameters `json:"sharedEgressGateway,omitempty" tf:"shared_egress_gateway,omitempty"` +} + +type GatewayObservation struct { + + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // An optional description of this resource. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to apply to this VPC Gateway. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the VPC Gateway. Provided by the client when the VPC Gateway is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Shared egress gateway configuration. Currently empty. + SharedEgressGateway []SharedEgressGatewayParameters `json:"sharedEgressGateway,omitempty" tf:"shared_egress_gateway,omitempty"` +} + +type GatewayParameters struct { + + // An optional description of this resource. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to apply to this VPC Gateway. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the VPC Gateway. Provided by the client when the VPC Gateway is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Shared egress gateway configuration. Currently empty. + // +kubebuilder:validation:Optional + SharedEgressGateway []SharedEgressGatewayParameters `json:"sharedEgressGateway,omitempty" tf:"shared_egress_gateway,omitempty"` +} + +type SharedEgressGatewayInitParameters struct { +} + +type SharedEgressGatewayObservation struct { +} + +type SharedEgressGatewayParameters struct { +} + +// GatewaySpec defines the desired state of Gateway +type GatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GatewayInitParameters `json:"initProvider,omitempty"` +} + +// GatewayStatus defines the observed state of Gateway. +type GatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Gateway is the Schema for the Gateways API. Manages a gateway within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Gateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec GatewaySpec `json:"spec"` + Status GatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayList contains a list of Gateways +type GatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Gateway `json:"items"` +} + +// Repository type metadata. +var ( + Gateway_Kind = "Gateway" + Gateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Gateway_Kind}.String() + Gateway_KindAPIVersion = Gateway_Kind + "." + CRDGroupVersion.String() + Gateway_GroupVersionKind = CRDGroupVersion.WithKind(Gateway_Kind) +) + +func init() { + SchemeBuilder.Register(&Gateway{}, &GatewayList{}) +} diff --git a/apis/vpc/v1alpha1/zz_generated.conversion_hubs.go b/apis/vpc/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..b9d65b2 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,30 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Address) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DefaultSecurityGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Gateway) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Network) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PrivateEndpoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RouteTable) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SecurityGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SecurityGroupRule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Subnet) Hub() {} diff --git a/apis/vpc/v1alpha1/zz_generated.deepcopy.go b/apis/vpc/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..3298982 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,4826 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Address) DeepCopyInto(out *Address) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Address. +func (in *Address) DeepCopy() *Address { + if in == nil { + return nil + } + out := new(Address) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Address) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressInitParameters) DeepCopyInto(out *AddressInitParameters) { + *out = *in + if in.DNSRecord != nil { + in, out := &in.DNSRecord, &out.DNSRecord + *out = make([]DNSRecordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalIPv4Address != nil { + in, out := &in.ExternalIPv4Address, &out.ExternalIPv4Address + *out = make([]ExternalIPv4AddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressInitParameters. +func (in *AddressInitParameters) DeepCopy() *AddressInitParameters { + if in == nil { + return nil + } + out := new(AddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressList) DeepCopyInto(out *AddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Address, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressList. +func (in *AddressList) DeepCopy() *AddressList { + if in == nil { + return nil + } + out := new(AddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressObservation) DeepCopyInto(out *AddressObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DNSRecord != nil { + in, out := &in.DNSRecord, &out.DNSRecord + *out = make([]DNSRecordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalIPv4Address != nil { + in, out := &in.ExternalIPv4Address, &out.ExternalIPv4Address + *out = make([]ExternalIPv4AddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Reserved != nil { + in, out := &in.Reserved, &out.Reserved + *out = new(bool) + **out = **in + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressObservation. +func (in *AddressObservation) DeepCopy() *AddressObservation { + if in == nil { + return nil + } + out := new(AddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressParameters) DeepCopyInto(out *AddressParameters) { + *out = *in + if in.DNSRecord != nil { + in, out := &in.DNSRecord, &out.DNSRecord + *out = make([]DNSRecordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalIPv4Address != nil { + in, out := &in.ExternalIPv4Address, &out.ExternalIPv4Address + *out = make([]ExternalIPv4AddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressParameters. +func (in *AddressParameters) DeepCopy() *AddressParameters { + if in == nil { + return nil + } + out := new(AddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressSpec) DeepCopyInto(out *AddressSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressSpec. +func (in *AddressSpec) DeepCopy() *AddressSpec { + if in == nil { + return nil + } + out := new(AddressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressStatus) DeepCopyInto(out *AddressStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressStatus. +func (in *AddressStatus) DeepCopy() *AddressStatus { + if in == nil { + return nil + } + out := new(AddressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DHCPOptionsInitParameters) DeepCopyInto(out *DHCPOptionsInitParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainNameServers != nil { + in, out := &in.DomainNameServers, &out.DomainNameServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NtpServers != nil { + in, out := &in.NtpServers, &out.NtpServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DHCPOptionsInitParameters. +func (in *DHCPOptionsInitParameters) DeepCopy() *DHCPOptionsInitParameters { + if in == nil { + return nil + } + out := new(DHCPOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DHCPOptionsObservation) DeepCopyInto(out *DHCPOptionsObservation) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainNameServers != nil { + in, out := &in.DomainNameServers, &out.DomainNameServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NtpServers != nil { + in, out := &in.NtpServers, &out.NtpServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DHCPOptionsObservation. +func (in *DHCPOptionsObservation) DeepCopy() *DHCPOptionsObservation { + if in == nil { + return nil + } + out := new(DHCPOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DHCPOptionsParameters) DeepCopyInto(out *DHCPOptionsParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainNameServers != nil { + in, out := &in.DomainNameServers, &out.DomainNameServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NtpServers != nil { + in, out := &in.NtpServers, &out.NtpServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DHCPOptionsParameters. +func (in *DHCPOptionsParameters) DeepCopy() *DHCPOptionsParameters { + if in == nil { + return nil + } + out := new(DHCPOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOptionsInitParameters) DeepCopyInto(out *DNSOptionsInitParameters) { + *out = *in + if in.PrivateDNSRecordsEnabled != nil { + in, out := &in.PrivateDNSRecordsEnabled, &out.PrivateDNSRecordsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOptionsInitParameters. +func (in *DNSOptionsInitParameters) DeepCopy() *DNSOptionsInitParameters { + if in == nil { + return nil + } + out := new(DNSOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOptionsObservation) DeepCopyInto(out *DNSOptionsObservation) { + *out = *in + if in.PrivateDNSRecordsEnabled != nil { + in, out := &in.PrivateDNSRecordsEnabled, &out.PrivateDNSRecordsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOptionsObservation. +func (in *DNSOptionsObservation) DeepCopy() *DNSOptionsObservation { + if in == nil { + return nil + } + out := new(DNSOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOptionsParameters) DeepCopyInto(out *DNSOptionsParameters) { + *out = *in + if in.PrivateDNSRecordsEnabled != nil { + in, out := &in.PrivateDNSRecordsEnabled, &out.PrivateDNSRecordsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOptionsParameters. +func (in *DNSOptionsParameters) DeepCopy() *DNSOptionsParameters { + if in == nil { + return nil + } + out := new(DNSOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSRecordInitParameters) DeepCopyInto(out *DNSRecordInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSRecordInitParameters. +func (in *DNSRecordInitParameters) DeepCopy() *DNSRecordInitParameters { + if in == nil { + return nil + } + out := new(DNSRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSRecordObservation) DeepCopyInto(out *DNSRecordObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSRecordObservation. +func (in *DNSRecordObservation) DeepCopy() *DNSRecordObservation { + if in == nil { + return nil + } + out := new(DNSRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSRecordParameters) DeepCopyInto(out *DNSRecordParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSRecordParameters. +func (in *DNSRecordParameters) DeepCopy() *DNSRecordParameters { + if in == nil { + return nil + } + out := new(DNSRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSecurityGroup) DeepCopyInto(out *DefaultSecurityGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSecurityGroup. +func (in *DefaultSecurityGroup) DeepCopy() *DefaultSecurityGroup { + if in == nil { + return nil + } + out := new(DefaultSecurityGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DefaultSecurityGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSecurityGroupInitParameters) DeepCopyInto(out *DefaultSecurityGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]EgressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]IngressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSecurityGroupInitParameters. +func (in *DefaultSecurityGroupInitParameters) DeepCopy() *DefaultSecurityGroupInitParameters { + if in == nil { + return nil + } + out := new(DefaultSecurityGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSecurityGroupList) DeepCopyInto(out *DefaultSecurityGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DefaultSecurityGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSecurityGroupList. +func (in *DefaultSecurityGroupList) DeepCopy() *DefaultSecurityGroupList { + if in == nil { + return nil + } + out := new(DefaultSecurityGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DefaultSecurityGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSecurityGroupObservation) DeepCopyInto(out *DefaultSecurityGroupObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]EgressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]IngressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSecurityGroupObservation. +func (in *DefaultSecurityGroupObservation) DeepCopy() *DefaultSecurityGroupObservation { + if in == nil { + return nil + } + out := new(DefaultSecurityGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSecurityGroupParameters) DeepCopyInto(out *DefaultSecurityGroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]EgressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]IngressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSecurityGroupParameters. +func (in *DefaultSecurityGroupParameters) DeepCopy() *DefaultSecurityGroupParameters { + if in == nil { + return nil + } + out := new(DefaultSecurityGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSecurityGroupSpec) DeepCopyInto(out *DefaultSecurityGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSecurityGroupSpec. +func (in *DefaultSecurityGroupSpec) DeepCopy() *DefaultSecurityGroupSpec { + if in == nil { + return nil + } + out := new(DefaultSecurityGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSecurityGroupStatus) DeepCopyInto(out *DefaultSecurityGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSecurityGroupStatus. +func (in *DefaultSecurityGroupStatus) DeepCopy() *DefaultSecurityGroupStatus { + if in == nil { + return nil + } + out := new(DefaultSecurityGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressInitParameters) DeepCopyInto(out *EgressInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressInitParameters. +func (in *EgressInitParameters) DeepCopy() *EgressInitParameters { + if in == nil { + return nil + } + out := new(EgressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressObservation) DeepCopyInto(out *EgressObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressObservation. +func (in *EgressObservation) DeepCopy() *EgressObservation { + if in == nil { + return nil + } + out := new(EgressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressParameters) DeepCopyInto(out *EgressParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressParameters. +func (in *EgressParameters) DeepCopy() *EgressParameters { + if in == nil { + return nil + } + out := new(EgressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddressInitParameters) DeepCopyInto(out *EndpointAddressInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.AddressID != nil { + in, out := &in.AddressID, &out.AddressID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddressInitParameters. +func (in *EndpointAddressInitParameters) DeepCopy() *EndpointAddressInitParameters { + if in == nil { + return nil + } + out := new(EndpointAddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddressObservation) DeepCopyInto(out *EndpointAddressObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.AddressID != nil { + in, out := &in.AddressID, &out.AddressID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddressObservation. +func (in *EndpointAddressObservation) DeepCopy() *EndpointAddressObservation { + if in == nil { + return nil + } + out := new(EndpointAddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddressParameters) DeepCopyInto(out *EndpointAddressParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.AddressID != nil { + in, out := &in.AddressID, &out.AddressID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddressParameters. +func (in *EndpointAddressParameters) DeepCopy() *EndpointAddressParameters { + if in == nil { + return nil + } + out := new(EndpointAddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPv4AddressInitParameters) DeepCopyInto(out *ExternalIPv4AddressInitParameters) { + *out = *in + if in.DdosProtectionProvider != nil { + in, out := &in.DdosProtectionProvider, &out.DdosProtectionProvider + *out = new(string) + **out = **in + } + if in.OutgoingSMTPCapability != nil { + in, out := &in.OutgoingSMTPCapability, &out.OutgoingSMTPCapability + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPv4AddressInitParameters. +func (in *ExternalIPv4AddressInitParameters) DeepCopy() *ExternalIPv4AddressInitParameters { + if in == nil { + return nil + } + out := new(ExternalIPv4AddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPv4AddressObservation) DeepCopyInto(out *ExternalIPv4AddressObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.DdosProtectionProvider != nil { + in, out := &in.DdosProtectionProvider, &out.DdosProtectionProvider + *out = new(string) + **out = **in + } + if in.OutgoingSMTPCapability != nil { + in, out := &in.OutgoingSMTPCapability, &out.OutgoingSMTPCapability + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPv4AddressObservation. +func (in *ExternalIPv4AddressObservation) DeepCopy() *ExternalIPv4AddressObservation { + if in == nil { + return nil + } + out := new(ExternalIPv4AddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPv4AddressParameters) DeepCopyInto(out *ExternalIPv4AddressParameters) { + *out = *in + if in.DdosProtectionProvider != nil { + in, out := &in.DdosProtectionProvider, &out.DdosProtectionProvider + *out = new(string) + **out = **in + } + if in.OutgoingSMTPCapability != nil { + in, out := &in.OutgoingSMTPCapability, &out.OutgoingSMTPCapability + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPv4AddressParameters. +func (in *ExternalIPv4AddressParameters) DeepCopy() *ExternalIPv4AddressParameters { + if in == nil { + return nil + } + out := new(ExternalIPv4AddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayInitParameters) DeepCopyInto(out *GatewayInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SharedEgressGateway != nil { + in, out := &in.SharedEgressGateway, &out.SharedEgressGateway + *out = make([]SharedEgressGatewayInitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayInitParameters. +func (in *GatewayInitParameters) DeepCopy() *GatewayInitParameters { + if in == nil { + return nil + } + out := new(GatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayList) DeepCopyInto(out *GatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList. +func (in *GatewayList) DeepCopy() *GatewayList { + if in == nil { + return nil + } + out := new(GatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayObservation) DeepCopyInto(out *GatewayObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SharedEgressGateway != nil { + in, out := &in.SharedEgressGateway, &out.SharedEgressGateway + *out = make([]SharedEgressGatewayParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayObservation. +func (in *GatewayObservation) DeepCopy() *GatewayObservation { + if in == nil { + return nil + } + out := new(GatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayParameters) DeepCopyInto(out *GatewayParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SharedEgressGateway != nil { + in, out := &in.SharedEgressGateway, &out.SharedEgressGateway + *out = make([]SharedEgressGatewayParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayParameters. +func (in *GatewayParameters) DeepCopy() *GatewayParameters { + if in == nil { + return nil + } + out := new(GatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. +func (in *GatewaySpec) DeepCopy() *GatewaySpec { + if in == nil { + return nil + } + out := new(GatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayStatus. +func (in *GatewayStatus) DeepCopy() *GatewayStatus { + if in == nil { + return nil + } + out := new(GatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressInitParameters) DeepCopyInto(out *IngressInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressInitParameters. +func (in *IngressInitParameters) DeepCopy() *IngressInitParameters { + if in == nil { + return nil + } + out := new(IngressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressObservation) DeepCopyInto(out *IngressObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressObservation. +func (in *IngressObservation) DeepCopy() *IngressObservation { + if in == nil { + return nil + } + out := new(IngressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressParameters) DeepCopyInto(out *IngressParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressParameters. +func (in *IngressParameters) DeepCopy() *IngressParameters { + if in == nil { + return nil + } + out := new(IngressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInitParameters) DeepCopyInto(out *NetworkInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInitParameters. +func (in *NetworkInitParameters) DeepCopy() *NetworkInitParameters { + if in == nil { + return nil + } + out := new(NetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkObservation) DeepCopyInto(out *NetworkObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DefaultSecurityGroupID != nil { + in, out := &in.DefaultSecurityGroupID, &out.DefaultSecurityGroupID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkObservation. +func (in *NetworkObservation) DeepCopy() *NetworkObservation { + if in == nil { + return nil + } + out := new(NetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkParameters) DeepCopyInto(out *NetworkParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParameters. +func (in *NetworkParameters) DeepCopy() *NetworkParameters { + if in == nil { + return nil + } + out := new(NetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageInitParameters) DeepCopyInto(out *ObjectStorageInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageInitParameters. +func (in *ObjectStorageInitParameters) DeepCopy() *ObjectStorageInitParameters { + if in == nil { + return nil + } + out := new(ObjectStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageObservation) DeepCopyInto(out *ObjectStorageObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageObservation. +func (in *ObjectStorageObservation) DeepCopy() *ObjectStorageObservation { + if in == nil { + return nil + } + out := new(ObjectStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageParameters) DeepCopyInto(out *ObjectStorageParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageParameters. +func (in *ObjectStorageParameters) DeepCopy() *ObjectStorageParameters { + if in == nil { + return nil + } + out := new(ObjectStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpoint) DeepCopyInto(out *PrivateEndpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpoint. +func (in *PrivateEndpoint) DeepCopy() *PrivateEndpoint { + if in == nil { + return nil + } + out := new(PrivateEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PrivateEndpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointInitParameters) DeepCopyInto(out *PrivateEndpointInitParameters) { + *out = *in + if in.DNSOptions != nil { + in, out := &in.DNSOptions, &out.DNSOptions + *out = make([]DNSOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EndpointAddress != nil { + in, out := &in.EndpointAddress, &out.EndpointAddress + *out = make([]EndpointAddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageInitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointInitParameters. +func (in *PrivateEndpointInitParameters) DeepCopy() *PrivateEndpointInitParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointList) DeepCopyInto(out *PrivateEndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PrivateEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointList. +func (in *PrivateEndpointList) DeepCopy() *PrivateEndpointList { + if in == nil { + return nil + } + out := new(PrivateEndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PrivateEndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointObservation) DeepCopyInto(out *PrivateEndpointObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DNSOptions != nil { + in, out := &in.DNSOptions, &out.DNSOptions + *out = make([]DNSOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EndpointAddress != nil { + in, out := &in.EndpointAddress, &out.EndpointAddress + *out = make([]EndpointAddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageParameters, len(*in)) + copy(*out, *in) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointObservation. +func (in *PrivateEndpointObservation) DeepCopy() *PrivateEndpointObservation { + if in == nil { + return nil + } + out := new(PrivateEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointParameters) DeepCopyInto(out *PrivateEndpointParameters) { + *out = *in + if in.DNSOptions != nil { + in, out := &in.DNSOptions, &out.DNSOptions + *out = make([]DNSOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EndpointAddress != nil { + in, out := &in.EndpointAddress, &out.EndpointAddress + *out = make([]EndpointAddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointParameters. +func (in *PrivateEndpointParameters) DeepCopy() *PrivateEndpointParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointSpec) DeepCopyInto(out *PrivateEndpointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointSpec. +func (in *PrivateEndpointSpec) DeepCopy() *PrivateEndpointSpec { + if in == nil { + return nil + } + out := new(PrivateEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointStatus) DeepCopyInto(out *PrivateEndpointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointStatus. +func (in *PrivateEndpointStatus) DeepCopy() *PrivateEndpointStatus { + if in == nil { + return nil + } + out := new(PrivateEndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTable) DeepCopyInto(out *RouteTable) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTable. +func (in *RouteTable) DeepCopy() *RouteTable { + if in == nil { + return nil + } + out := new(RouteTable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteTable) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTableInitParameters) DeepCopyInto(out *RouteTableInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StaticRoute != nil { + in, out := &in.StaticRoute, &out.StaticRoute + *out = make([]StaticRouteInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableInitParameters. +func (in *RouteTableInitParameters) DeepCopy() *RouteTableInitParameters { + if in == nil { + return nil + } + out := new(RouteTableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTableList) DeepCopyInto(out *RouteTableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RouteTable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableList. +func (in *RouteTableList) DeepCopy() *RouteTableList { + if in == nil { + return nil + } + out := new(RouteTableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteTableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTableObservation) DeepCopyInto(out *RouteTableObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.StaticRoute != nil { + in, out := &in.StaticRoute, &out.StaticRoute + *out = make([]StaticRouteObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableObservation. +func (in *RouteTableObservation) DeepCopy() *RouteTableObservation { + if in == nil { + return nil + } + out := new(RouteTableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTableParameters) DeepCopyInto(out *RouteTableParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StaticRoute != nil { + in, out := &in.StaticRoute, &out.StaticRoute + *out = make([]StaticRouteParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableParameters. +func (in *RouteTableParameters) DeepCopy() *RouteTableParameters { + if in == nil { + return nil + } + out := new(RouteTableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTableSpec) DeepCopyInto(out *RouteTableSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableSpec. +func (in *RouteTableSpec) DeepCopy() *RouteTableSpec { + if in == nil { + return nil + } + out := new(RouteTableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTableStatus) DeepCopyInto(out *RouteTableStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableStatus. +func (in *RouteTableStatus) DeepCopy() *RouteTableStatus { + if in == nil { + return nil + } + out := new(RouteTableStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroup) DeepCopyInto(out *SecurityGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroup. +func (in *SecurityGroup) DeepCopy() *SecurityGroup { + if in == nil { + return nil + } + out := new(SecurityGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupEgressInitParameters) DeepCopyInto(out *SecurityGroupEgressInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupEgressInitParameters. +func (in *SecurityGroupEgressInitParameters) DeepCopy() *SecurityGroupEgressInitParameters { + if in == nil { + return nil + } + out := new(SecurityGroupEgressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupEgressObservation) DeepCopyInto(out *SecurityGroupEgressObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupEgressObservation. +func (in *SecurityGroupEgressObservation) DeepCopy() *SecurityGroupEgressObservation { + if in == nil { + return nil + } + out := new(SecurityGroupEgressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupEgressParameters) DeepCopyInto(out *SecurityGroupEgressParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupEgressParameters. +func (in *SecurityGroupEgressParameters) DeepCopy() *SecurityGroupEgressParameters { + if in == nil { + return nil + } + out := new(SecurityGroupEgressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupIngressInitParameters) DeepCopyInto(out *SecurityGroupIngressInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupIngressInitParameters. +func (in *SecurityGroupIngressInitParameters) DeepCopy() *SecurityGroupIngressInitParameters { + if in == nil { + return nil + } + out := new(SecurityGroupIngressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupIngressObservation) DeepCopyInto(out *SecurityGroupIngressObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupIngressObservation. +func (in *SecurityGroupIngressObservation) DeepCopy() *SecurityGroupIngressObservation { + if in == nil { + return nil + } + out := new(SecurityGroupIngressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupIngressParameters) DeepCopyInto(out *SecurityGroupIngressParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupIngressParameters. +func (in *SecurityGroupIngressParameters) DeepCopy() *SecurityGroupIngressParameters { + if in == nil { + return nil + } + out := new(SecurityGroupIngressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupInitParameters) DeepCopyInto(out *SecurityGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]SecurityGroupEgressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]SecurityGroupIngressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupInitParameters. +func (in *SecurityGroupInitParameters) DeepCopy() *SecurityGroupInitParameters { + if in == nil { + return nil + } + out := new(SecurityGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupList) DeepCopyInto(out *SecurityGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecurityGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupList. +func (in *SecurityGroupList) DeepCopy() *SecurityGroupList { + if in == nil { + return nil + } + out := new(SecurityGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupObservation) DeepCopyInto(out *SecurityGroupObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]SecurityGroupEgressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]SecurityGroupIngressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupObservation. +func (in *SecurityGroupObservation) DeepCopy() *SecurityGroupObservation { + if in == nil { + return nil + } + out := new(SecurityGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupParameters) DeepCopyInto(out *SecurityGroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]SecurityGroupEgressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]SecurityGroupIngressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupParameters. +func (in *SecurityGroupParameters) DeepCopy() *SecurityGroupParameters { + if in == nil { + return nil + } + out := new(SecurityGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupRule) DeepCopyInto(out *SecurityGroupRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRule. +func (in *SecurityGroupRule) DeepCopy() *SecurityGroupRule { + if in == nil { + return nil + } + out := new(SecurityGroupRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityGroupRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupRuleInitParameters) DeepCopyInto(out *SecurityGroupRuleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupBinding != nil { + in, out := &in.SecurityGroupBinding, &out.SecurityGroupBinding + *out = new(string) + **out = **in + } + if in.SecurityGroupBindingRef != nil { + in, out := &in.SecurityGroupBindingRef, &out.SecurityGroupBindingRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupBindingSelector != nil { + in, out := &in.SecurityGroupBindingSelector, &out.SecurityGroupBindingSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRuleInitParameters. +func (in *SecurityGroupRuleInitParameters) DeepCopy() *SecurityGroupRuleInitParameters { + if in == nil { + return nil + } + out := new(SecurityGroupRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupRuleList) DeepCopyInto(out *SecurityGroupRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecurityGroupRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRuleList. +func (in *SecurityGroupRuleList) DeepCopy() *SecurityGroupRuleList { + if in == nil { + return nil + } + out := new(SecurityGroupRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityGroupRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupRuleObservation) DeepCopyInto(out *SecurityGroupRuleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupBinding != nil { + in, out := &in.SecurityGroupBinding, &out.SecurityGroupBinding + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRuleObservation. +func (in *SecurityGroupRuleObservation) DeepCopy() *SecurityGroupRuleObservation { + if in == nil { + return nil + } + out := new(SecurityGroupRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupRuleParameters) DeepCopyInto(out *SecurityGroupRuleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PredefinedTarget != nil { + in, out := &in.PredefinedTarget, &out.PredefinedTarget + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SecurityGroupBinding != nil { + in, out := &in.SecurityGroupBinding, &out.SecurityGroupBinding + *out = new(string) + **out = **in + } + if in.SecurityGroupBindingRef != nil { + in, out := &in.SecurityGroupBindingRef, &out.SecurityGroupBindingRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupBindingSelector != nil { + in, out := &in.SecurityGroupBindingSelector, &out.SecurityGroupBindingSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRuleParameters. +func (in *SecurityGroupRuleParameters) DeepCopy() *SecurityGroupRuleParameters { + if in == nil { + return nil + } + out := new(SecurityGroupRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupRuleSpec) DeepCopyInto(out *SecurityGroupRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRuleSpec. +func (in *SecurityGroupRuleSpec) DeepCopy() *SecurityGroupRuleSpec { + if in == nil { + return nil + } + out := new(SecurityGroupRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupRuleStatus) DeepCopyInto(out *SecurityGroupRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRuleStatus. +func (in *SecurityGroupRuleStatus) DeepCopy() *SecurityGroupRuleStatus { + if in == nil { + return nil + } + out := new(SecurityGroupRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupSpec) DeepCopyInto(out *SecurityGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupSpec. +func (in *SecurityGroupSpec) DeepCopy() *SecurityGroupSpec { + if in == nil { + return nil + } + out := new(SecurityGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupStatus) DeepCopyInto(out *SecurityGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupStatus. +func (in *SecurityGroupStatus) DeepCopy() *SecurityGroupStatus { + if in == nil { + return nil + } + out := new(SecurityGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedEgressGatewayInitParameters) DeepCopyInto(out *SharedEgressGatewayInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedEgressGatewayInitParameters. +func (in *SharedEgressGatewayInitParameters) DeepCopy() *SharedEgressGatewayInitParameters { + if in == nil { + return nil + } + out := new(SharedEgressGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedEgressGatewayObservation) DeepCopyInto(out *SharedEgressGatewayObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedEgressGatewayObservation. +func (in *SharedEgressGatewayObservation) DeepCopy() *SharedEgressGatewayObservation { + if in == nil { + return nil + } + out := new(SharedEgressGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedEgressGatewayParameters) DeepCopyInto(out *SharedEgressGatewayParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedEgressGatewayParameters. +func (in *SharedEgressGatewayParameters) DeepCopy() *SharedEgressGatewayParameters { + if in == nil { + return nil + } + out := new(SharedEgressGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticRouteInitParameters) DeepCopyInto(out *StaticRouteInitParameters) { + *out = *in + if in.DestinationPrefix != nil { + in, out := &in.DestinationPrefix, &out.DestinationPrefix + *out = new(string) + **out = **in + } + if in.GatewayID != nil { + in, out := &in.GatewayID, &out.GatewayID + *out = new(string) + **out = **in + } + if in.GatewayIDRef != nil { + in, out := &in.GatewayIDRef, &out.GatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GatewayIDSelector != nil { + in, out := &in.GatewayIDSelector, &out.GatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NextHopAddress != nil { + in, out := &in.NextHopAddress, &out.NextHopAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticRouteInitParameters. +func (in *StaticRouteInitParameters) DeepCopy() *StaticRouteInitParameters { + if in == nil { + return nil + } + out := new(StaticRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticRouteObservation) DeepCopyInto(out *StaticRouteObservation) { + *out = *in + if in.DestinationPrefix != nil { + in, out := &in.DestinationPrefix, &out.DestinationPrefix + *out = new(string) + **out = **in + } + if in.GatewayID != nil { + in, out := &in.GatewayID, &out.GatewayID + *out = new(string) + **out = **in + } + if in.NextHopAddress != nil { + in, out := &in.NextHopAddress, &out.NextHopAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticRouteObservation. +func (in *StaticRouteObservation) DeepCopy() *StaticRouteObservation { + if in == nil { + return nil + } + out := new(StaticRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticRouteParameters) DeepCopyInto(out *StaticRouteParameters) { + *out = *in + if in.DestinationPrefix != nil { + in, out := &in.DestinationPrefix, &out.DestinationPrefix + *out = new(string) + **out = **in + } + if in.GatewayID != nil { + in, out := &in.GatewayID, &out.GatewayID + *out = new(string) + **out = **in + } + if in.GatewayIDRef != nil { + in, out := &in.GatewayIDRef, &out.GatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GatewayIDSelector != nil { + in, out := &in.GatewayIDSelector, &out.GatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NextHopAddress != nil { + in, out := &in.NextHopAddress, &out.NextHopAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticRouteParameters. +func (in *StaticRouteParameters) DeepCopy() *StaticRouteParameters { + if in == nil { + return nil + } + out := new(StaticRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subnet) DeepCopyInto(out *Subnet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnet. +func (in *Subnet) DeepCopy() *Subnet { + if in == nil { + return nil + } + out := new(Subnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Subnet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetInitParameters) DeepCopyInto(out *SubnetInitParameters) { + *out = *in + if in.DHCPOptions != nil { + in, out := &in.DHCPOptions, &out.DHCPOptions + *out = make([]DHCPOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RouteTableID != nil { + in, out := &in.RouteTableID, &out.RouteTableID + *out = new(string) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetInitParameters. +func (in *SubnetInitParameters) DeepCopy() *SubnetInitParameters { + if in == nil { + return nil + } + out := new(SubnetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetList) DeepCopyInto(out *SubnetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Subnet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetList. +func (in *SubnetList) DeepCopy() *SubnetList { + if in == nil { + return nil + } + out := new(SubnetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubnetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetObservation) DeepCopyInto(out *SubnetObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DHCPOptions != nil { + in, out := &in.DHCPOptions, &out.DHCPOptions + *out = make([]DHCPOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.RouteTableID != nil { + in, out := &in.RouteTableID, &out.RouteTableID + *out = new(string) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.V6CidrBlocks != nil { + in, out := &in.V6CidrBlocks, &out.V6CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetObservation. +func (in *SubnetObservation) DeepCopy() *SubnetObservation { + if in == nil { + return nil + } + out := new(SubnetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetParameters) DeepCopyInto(out *SubnetParameters) { + *out = *in + if in.DHCPOptions != nil { + in, out := &in.DHCPOptions, &out.DHCPOptions + *out = make([]DHCPOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RouteTableID != nil { + in, out := &in.RouteTableID, &out.RouteTableID + *out = new(string) + **out = **in + } + if in.V4CidrBlocks != nil { + in, out := &in.V4CidrBlocks, &out.V4CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetParameters. +func (in *SubnetParameters) DeepCopy() *SubnetParameters { + if in == nil { + return nil + } + out := new(SubnetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetSpec. +func (in *SubnetSpec) DeepCopy() *SubnetSpec { + if in == nil { + return nil + } + out := new(SubnetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetStatus) DeepCopyInto(out *SubnetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetStatus. +func (in *SubnetStatus) DeepCopy() *SubnetStatus { + if in == nil { + return nil + } + out := new(SubnetStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/vpc/v1alpha1/zz_generated.resolvers.go b/apis/vpc/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..75ae70c --- /dev/null +++ b/apis/vpc/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,623 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Address. +func (mg *Address) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DefaultSecurityGroup. +func (mg *DefaultSecurityGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Gateway. +func (mg *Gateway) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Network. +func (mg *Network) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this PrivateEndpoint. +func (mg *PrivateEndpoint) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.EndpointAddress); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EndpointAddress[i3].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.EndpointAddress[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.EndpointAddress[i3].SubnetIDSelector, + To: reference.To{ + List: &SubnetList{}, + Managed: &Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EndpointAddress[i3].SubnetID") + } + mg.Spec.ForProvider.EndpointAddress[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EndpointAddress[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.EndpointAddress); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EndpointAddress[i3].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.EndpointAddress[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.EndpointAddress[i3].SubnetIDSelector, + To: reference.To{ + List: &SubnetList{}, + Managed: &Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EndpointAddress[i3].SubnetID") + } + mg.Spec.InitProvider.EndpointAddress[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EndpointAddress[i3].SubnetIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RouteTable. +func (mg *RouteTable) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.StaticRoute); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StaticRoute[i3].GatewayID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StaticRoute[i3].GatewayIDRef, + Selector: mg.Spec.ForProvider.StaticRoute[i3].GatewayIDSelector, + To: reference.To{ + List: &GatewayList{}, + Managed: &Gateway{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StaticRoute[i3].GatewayID") + } + mg.Spec.ForProvider.StaticRoute[i3].GatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StaticRoute[i3].GatewayIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.StaticRoute); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StaticRoute[i3].GatewayID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StaticRoute[i3].GatewayIDRef, + Selector: mg.Spec.InitProvider.StaticRoute[i3].GatewayIDSelector, + To: reference.To{ + List: &GatewayList{}, + Managed: &Gateway{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StaticRoute[i3].GatewayID") + } + mg.Spec.InitProvider.StaticRoute[i3].GatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StaticRoute[i3].GatewayIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this SecurityGroup. +func (mg *SecurityGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SecurityGroupRule. +func (mg *SecurityGroupRule) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SecurityGroupBinding), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SecurityGroupBindingRef, + Selector: mg.Spec.ForProvider.SecurityGroupBindingSelector, + To: reference.To{ + List: &SecurityGroupList{}, + Managed: &SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupBinding") + } + mg.Spec.ForProvider.SecurityGroupBinding = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SecurityGroupBindingRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SecurityGroupBinding), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SecurityGroupBindingRef, + Selector: mg.Spec.InitProvider.SecurityGroupBindingSelector, + To: reference.To{ + List: &SecurityGroupList{}, + Managed: &SecurityGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupBinding") + } + mg.Spec.InitProvider.SecurityGroupBinding = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SecurityGroupBindingRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Subnet. +func (mg *Subnet) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &NetworkList{}, + Managed: &Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/vpc/v1alpha1/zz_groupversion_info.go b/apis/vpc/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..5704393 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=vpc.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "vpc.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/vpc/v1alpha1/zz_network_terraformed.go b/apis/vpc/v1alpha1/zz_network_terraformed.go new file mode 100755 index 0000000..20f02b3 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_network_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Network +func (mg *Network) GetTerraformResourceType() string { + return "yandex_vpc_network" +} + +// GetConnectionDetailsMapping for this Network +func (tr *Network) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Network +func (tr *Network) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Network +func (tr *Network) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Network +func (tr *Network) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Network +func (tr *Network) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Network +func (tr *Network) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Network +func (tr *Network) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Network +func (tr *Network) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Network using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Network) LateInitialize(attrs []byte) (bool, error) { + params := &NetworkParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Network) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/vpc/v1alpha1/zz_network_types.go b/apis/vpc/v1alpha1/zz_network_types.go new file mode 100755 index 0000000..b475e6d --- /dev/null +++ b/apis/vpc/v1alpha1/zz_network_types.go @@ -0,0 +1,151 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type NetworkInitParameters struct { + + // An optional description of this resource. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to apply to this network. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the network. Provided by the client when the network is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type NetworkObservation struct { + + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // ID of default Security Group of this network. + DefaultSecurityGroupID *string `json:"defaultSecurityGroupId,omitempty" tf:"default_security_group_id,omitempty"` + + // An optional description of this resource. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to apply to this network. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the network. Provided by the client when the network is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type NetworkParameters struct { + + // An optional description of this resource. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to apply to this network. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the network. Provided by the client when the network is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +// NetworkSpec defines the desired state of Network +type NetworkSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider NetworkParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider NetworkInitParameters `json:"initProvider,omitempty"` +} + +// NetworkStatus defines the observed state of Network. +type NetworkStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider NetworkObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Network is the Schema for the Networks API. Manages a network within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Network struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec NetworkSpec `json:"spec"` + Status NetworkStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NetworkList contains a list of Networks +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Network `json:"items"` +} + +// Repository type metadata. +var ( + Network_Kind = "Network" + Network_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Network_Kind}.String() + Network_KindAPIVersion = Network_Kind + "." + CRDGroupVersion.String() + Network_GroupVersionKind = CRDGroupVersion.WithKind(Network_Kind) +) + +func init() { + SchemeBuilder.Register(&Network{}, &NetworkList{}) +} diff --git a/apis/vpc/v1alpha1/zz_privateendpoint_terraformed.go b/apis/vpc/v1alpha1/zz_privateendpoint_terraformed.go new file mode 100755 index 0000000..547d541 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_privateendpoint_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PrivateEndpoint +func (mg *PrivateEndpoint) GetTerraformResourceType() string { + return "yandex_vpc_private_endpoint" +} + +// GetConnectionDetailsMapping for this PrivateEndpoint +func (tr *PrivateEndpoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PrivateEndpoint +func (tr *PrivateEndpoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PrivateEndpoint +func (tr *PrivateEndpoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PrivateEndpoint +func (tr *PrivateEndpoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PrivateEndpoint +func (tr *PrivateEndpoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PrivateEndpoint +func (tr *PrivateEndpoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PrivateEndpoint +func (tr *PrivateEndpoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PrivateEndpoint +func (tr *PrivateEndpoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PrivateEndpoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PrivateEndpoint) LateInitialize(attrs []byte) (bool, error) { + params := &PrivateEndpointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PrivateEndpoint) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/vpc/v1alpha1/zz_privateendpoint_types.go b/apis/vpc/v1alpha1/zz_privateendpoint_types.go new file mode 100755 index 0000000..5e7ba8b --- /dev/null +++ b/apis/vpc/v1alpha1/zz_privateendpoint_types.go @@ -0,0 +1,294 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DNSOptionsInitParameters struct { + + // If enabled - additional service dns will be created. + PrivateDNSRecordsEnabled *bool `json:"privateDnsRecordsEnabled,omitempty" tf:"private_dns_records_enabled,omitempty"` +} + +type DNSOptionsObservation struct { + + // If enabled - additional service dns will be created. + PrivateDNSRecordsEnabled *bool `json:"privateDnsRecordsEnabled,omitempty" tf:"private_dns_records_enabled,omitempty"` +} + +type DNSOptionsParameters struct { + + // If enabled - additional service dns will be created. + // +kubebuilder:validation:Optional + PrivateDNSRecordsEnabled *bool `json:"privateDnsRecordsEnabled,omitempty" tf:"private_dns_records_enabled,omitempty"` +} + +type EndpointAddressInitParameters struct { + + // Specifies IP address within subnet_id. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // ID of the address. + AddressID *string `json:"addressId,omitempty" tf:"address_id,omitempty"` + + // Subnet of the IP address. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type EndpointAddressObservation struct { + + // Specifies IP address within subnet_id. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // ID of the address. + AddressID *string `json:"addressId,omitempty" tf:"address_id,omitempty"` + + // Subnet of the IP address. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type EndpointAddressParameters struct { + + // Specifies IP address within subnet_id. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // ID of the address. + // +kubebuilder:validation:Optional + AddressID *string `json:"addressId,omitempty" tf:"address_id,omitempty"` + + // Subnet of the IP address. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type ObjectStorageInitParameters struct { +} + +type ObjectStorageObservation struct { +} + +type ObjectStorageParameters struct { +} + +type PrivateEndpointInitParameters struct { + + // Private endpoint DNS options block. + DNSOptions []DNSOptionsInitParameters `json:"dnsOptions,omitempty" tf:"dns_options,omitempty"` + + // An optional description of this resource. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Private endpoint address specification block. + EndpointAddress []EndpointAddressInitParameters `json:"endpointAddress,omitempty" tf:"endpoint_address,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to apply to this resource. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the private endpoint. Provided by the client when the private endpoint is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network which private endpoint belongs to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + ObjectStorage []ObjectStorageInitParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` +} + +type PrivateEndpointObservation struct { + + // Creation timestamp of the key. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Private endpoint DNS options block. + DNSOptions []DNSOptionsObservation `json:"dnsOptions,omitempty" tf:"dns_options,omitempty"` + + // An optional description of this resource. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Private endpoint address specification block. + EndpointAddress []EndpointAddressObservation `json:"endpointAddress,omitempty" tf:"endpoint_address,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to apply to this resource. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the private endpoint. Provided by the client when the private endpoint is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network which private endpoint belongs to. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + ObjectStorage []ObjectStorageParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + + // Status of the private endpoint. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type PrivateEndpointParameters struct { + + // Private endpoint DNS options block. + // +kubebuilder:validation:Optional + DNSOptions []DNSOptionsParameters `json:"dnsOptions,omitempty" tf:"dns_options,omitempty"` + + // An optional description of this resource. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Private endpoint address specification block. + // +kubebuilder:validation:Optional + EndpointAddress []EndpointAddressParameters `json:"endpointAddress,omitempty" tf:"endpoint_address,omitempty"` + + // ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to apply to this resource. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the private endpoint. Provided by the client when the private endpoint is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network which private endpoint belongs to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + ObjectStorage []ObjectStorageParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` +} + +// PrivateEndpointSpec defines the desired state of PrivateEndpoint +type PrivateEndpointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PrivateEndpointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PrivateEndpointInitParameters `json:"initProvider,omitempty"` +} + +// PrivateEndpointStatus defines the observed state of PrivateEndpoint. +type PrivateEndpointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PrivateEndpointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// PrivateEndpoint is the Schema for the PrivateEndpoints API. Manages a VPC Private Endpoint within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type PrivateEndpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.objectStorage) || (has(self.initProvider) && has(self.initProvider.objectStorage))",message="spec.forProvider.objectStorage is a required parameter" + Spec PrivateEndpointSpec `json:"spec"` + Status PrivateEndpointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PrivateEndpointList contains a list of PrivateEndpoints +type PrivateEndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PrivateEndpoint `json:"items"` +} + +// Repository type metadata. +var ( + PrivateEndpoint_Kind = "PrivateEndpoint" + PrivateEndpoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PrivateEndpoint_Kind}.String() + PrivateEndpoint_KindAPIVersion = PrivateEndpoint_Kind + "." + CRDGroupVersion.String() + PrivateEndpoint_GroupVersionKind = CRDGroupVersion.WithKind(PrivateEndpoint_Kind) +) + +func init() { + SchemeBuilder.Register(&PrivateEndpoint{}, &PrivateEndpointList{}) +} diff --git a/apis/vpc/v1alpha1/zz_routetable_terraformed.go b/apis/vpc/v1alpha1/zz_routetable_terraformed.go new file mode 100755 index 0000000..310f76e --- /dev/null +++ b/apis/vpc/v1alpha1/zz_routetable_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RouteTable +func (mg *RouteTable) GetTerraformResourceType() string { + return "yandex_vpc_route_table" +} + +// GetConnectionDetailsMapping for this RouteTable +func (tr *RouteTable) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RouteTable +func (tr *RouteTable) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RouteTable +func (tr *RouteTable) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RouteTable +func (tr *RouteTable) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RouteTable +func (tr *RouteTable) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RouteTable +func (tr *RouteTable) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RouteTable +func (tr *RouteTable) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RouteTable +func (tr *RouteTable) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RouteTable using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RouteTable) LateInitialize(attrs []byte) (bool, error) { + params := &RouteTableParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RouteTable) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/vpc/v1alpha1/zz_routetable_types.go b/apis/vpc/v1alpha1/zz_routetable_types.go new file mode 100755 index 0000000..5ae2209 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_routetable_types.go @@ -0,0 +1,241 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RouteTableInitParameters struct { + + // An optional description of the route table. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this route table. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the route table. Provided by the client when the route table is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this route table belongs to. + // +crossplane:generate:reference:type=Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // A list of static route records for the route table. The structure is documented below. + StaticRoute []StaticRouteInitParameters `json:"staticRoute,omitempty" tf:"static_route,omitempty"` +} + +type RouteTableObservation struct { + + // Creation timestamp of the route table. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // An optional description of the route table. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to assign to this route table. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the route table. Provided by the client when the route table is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this route table belongs to. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // A list of static route records for the route table. The structure is documented below. + StaticRoute []StaticRouteObservation `json:"staticRoute,omitempty" tf:"static_route,omitempty"` +} + +type RouteTableParameters struct { + + // An optional description of the route table. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this route table. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the route table. Provided by the client when the route table is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this route table belongs to. + // +crossplane:generate:reference:type=Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // A list of static route records for the route table. The structure is documented below. + // +kubebuilder:validation:Optional + StaticRoute []StaticRouteParameters `json:"staticRoute,omitempty" tf:"static_route,omitempty"` +} + +type StaticRouteInitParameters struct { + + // Route prefix in CIDR notation. + DestinationPrefix *string `json:"destinationPrefix,omitempty" tf:"destination_prefix,omitempty"` + + // ID of the gateway used ad next hop. + // +crossplane:generate:reference:type=Gateway + GatewayID *string `json:"gatewayId,omitempty" tf:"gateway_id,omitempty"` + + // Reference to a Gateway to populate gatewayId. + // +kubebuilder:validation:Optional + GatewayIDRef *v1.Reference `json:"gatewayIdRef,omitempty" tf:"-"` + + // Selector for a Gateway to populate gatewayId. + // +kubebuilder:validation:Optional + GatewayIDSelector *v1.Selector `json:"gatewayIdSelector,omitempty" tf:"-"` + + // Address of the next hop. + NextHopAddress *string `json:"nextHopAddress,omitempty" tf:"next_hop_address,omitempty"` +} + +type StaticRouteObservation struct { + + // Route prefix in CIDR notation. + DestinationPrefix *string `json:"destinationPrefix,omitempty" tf:"destination_prefix,omitempty"` + + // ID of the gateway used ad next hop. + GatewayID *string `json:"gatewayId,omitempty" tf:"gateway_id,omitempty"` + + // Address of the next hop. + NextHopAddress *string `json:"nextHopAddress,omitempty" tf:"next_hop_address,omitempty"` +} + +type StaticRouteParameters struct { + + // Route prefix in CIDR notation. + // +kubebuilder:validation:Optional + DestinationPrefix *string `json:"destinationPrefix,omitempty" tf:"destination_prefix,omitempty"` + + // ID of the gateway used ad next hop. + // +crossplane:generate:reference:type=Gateway + // +kubebuilder:validation:Optional + GatewayID *string `json:"gatewayId,omitempty" tf:"gateway_id,omitempty"` + + // Reference to a Gateway to populate gatewayId. + // +kubebuilder:validation:Optional + GatewayIDRef *v1.Reference `json:"gatewayIdRef,omitempty" tf:"-"` + + // Selector for a Gateway to populate gatewayId. + // +kubebuilder:validation:Optional + GatewayIDSelector *v1.Selector `json:"gatewayIdSelector,omitempty" tf:"-"` + + // Address of the next hop. + // +kubebuilder:validation:Optional + NextHopAddress *string `json:"nextHopAddress,omitempty" tf:"next_hop_address,omitempty"` +} + +// RouteTableSpec defines the desired state of RouteTable +type RouteTableSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RouteTableParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RouteTableInitParameters `json:"initProvider,omitempty"` +} + +// RouteTableStatus defines the observed state of RouteTable. +type RouteTableStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RouteTableObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// RouteTable is the Schema for the RouteTables API. A VPC route table is a virtual version of the traditional route table on router device. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type RouteTable struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec RouteTableSpec `json:"spec"` + Status RouteTableStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RouteTableList contains a list of RouteTables +type RouteTableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RouteTable `json:"items"` +} + +// Repository type metadata. +var ( + RouteTable_Kind = "RouteTable" + RouteTable_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RouteTable_Kind}.String() + RouteTable_KindAPIVersion = RouteTable_Kind + "." + CRDGroupVersion.String() + RouteTable_GroupVersionKind = CRDGroupVersion.WithKind(RouteTable_Kind) +) + +func init() { + SchemeBuilder.Register(&RouteTable{}, &RouteTableList{}) +} diff --git a/apis/vpc/v1alpha1/zz_securitygroup_terraformed.go b/apis/vpc/v1alpha1/zz_securitygroup_terraformed.go new file mode 100755 index 0000000..e251c72 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_securitygroup_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SecurityGroup +func (mg *SecurityGroup) GetTerraformResourceType() string { + return "yandex_vpc_security_group" +} + +// GetConnectionDetailsMapping for this SecurityGroup +func (tr *SecurityGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SecurityGroup +func (tr *SecurityGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SecurityGroup +func (tr *SecurityGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SecurityGroup +func (tr *SecurityGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SecurityGroup +func (tr *SecurityGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SecurityGroup +func (tr *SecurityGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SecurityGroup +func (tr *SecurityGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SecurityGroup +func (tr *SecurityGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SecurityGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SecurityGroup) LateInitialize(attrs []byte) (bool, error) { + params := &SecurityGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SecurityGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/vpc/v1alpha1/zz_securitygroup_types.go b/apis/vpc/v1alpha1/zz_securitygroup_types.go new file mode 100755 index 0000000..ee7fa03 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_securitygroup_types.go @@ -0,0 +1,428 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SecurityGroupEgressInitParameters struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this security group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type SecurityGroupEgressObservation struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Id of the rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to assign to this security group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type SecurityGroupEgressParameters struct { + + // Description of the rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this security group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + // +kubebuilder:validation:Optional + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + // +kubebuilder:validation:Optional + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + // +kubebuilder:validation:Optional + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + // +kubebuilder:validation:Optional + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type SecurityGroupIngressInitParameters struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this rule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type SecurityGroupIngressObservation struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Id of the rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to assign to this rule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type SecurityGroupIngressParameters struct { + + // Description of the rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Minimum port number. + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this rule. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets. self_security_group refers to this particular security group. loadbalancer_healthchecks represents loadbalancer health check nodes. + // +kubebuilder:validation:Optional + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Target security group ID for this rule. + // +kubebuilder:validation:Optional + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + // +kubebuilder:validation:Optional + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + // +kubebuilder:validation:Optional + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type SecurityGroupInitParameters struct { + + // Description of the security group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of egress rules. The structure is documented below. + Egress []SecurityGroupEgressInitParameters `json:"egress,omitempty" tf:"egress,omitempty"` + + // ID of the folder this security group belongs to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A list of ingress rules. + Ingress []SecurityGroupIngressInitParameters `json:"ingress,omitempty" tf:"ingress,omitempty"` + + // Labels to assign to this security group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the security group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this security group belongs to. + // +crossplane:generate:reference:type=Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` +} + +type SecurityGroupObservation struct { + + // Creation timestamp of this security group. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the security group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of egress rules. The structure is documented below. + Egress []SecurityGroupEgressObservation `json:"egress,omitempty" tf:"egress,omitempty"` + + // ID of the folder this security group belongs to. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Id of the rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A list of ingress rules. + Ingress []SecurityGroupIngressObservation `json:"ingress,omitempty" tf:"ingress,omitempty"` + + // Labels to assign to this security group. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the security group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this security group belongs to. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Status of this security group. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type SecurityGroupParameters struct { + + // Description of the security group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of egress rules. The structure is documented below. + // +kubebuilder:validation:Optional + Egress []SecurityGroupEgressParameters `json:"egress,omitempty" tf:"egress,omitempty"` + + // ID of the folder this security group belongs to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A list of ingress rules. + // +kubebuilder:validation:Optional + Ingress []SecurityGroupIngressParameters `json:"ingress,omitempty" tf:"ingress,omitempty"` + + // Labels to assign to this security group. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the security group. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this security group belongs to. + // +crossplane:generate:reference:type=Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` +} + +// SecurityGroupSpec defines the desired state of SecurityGroup +type SecurityGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SecurityGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SecurityGroupInitParameters `json:"initProvider,omitempty"` +} + +// SecurityGroupStatus defines the observed state of SecurityGroup. +type SecurityGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SecurityGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SecurityGroup is the Schema for the SecurityGroups API. Yandex VPC Security Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SecurityGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SecurityGroupSpec `json:"spec"` + Status SecurityGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SecurityGroupList contains a list of SecurityGroups +type SecurityGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SecurityGroup `json:"items"` +} + +// Repository type metadata. +var ( + SecurityGroup_Kind = "SecurityGroup" + SecurityGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SecurityGroup_Kind}.String() + SecurityGroup_KindAPIVersion = SecurityGroup_Kind + "." + CRDGroupVersion.String() + SecurityGroup_GroupVersionKind = CRDGroupVersion.WithKind(SecurityGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&SecurityGroup{}, &SecurityGroupList{}) +} diff --git a/apis/vpc/v1alpha1/zz_securitygrouprule_terraformed.go b/apis/vpc/v1alpha1/zz_securitygrouprule_terraformed.go new file mode 100755 index 0000000..de972cf --- /dev/null +++ b/apis/vpc/v1alpha1/zz_securitygrouprule_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SecurityGroupRule +func (mg *SecurityGroupRule) GetTerraformResourceType() string { + return "yandex_vpc_security_group_rule" +} + +// GetConnectionDetailsMapping for this SecurityGroupRule +func (tr *SecurityGroupRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SecurityGroupRule +func (tr *SecurityGroupRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SecurityGroupRule +func (tr *SecurityGroupRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SecurityGroupRule +func (tr *SecurityGroupRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SecurityGroupRule +func (tr *SecurityGroupRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SecurityGroupRule +func (tr *SecurityGroupRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SecurityGroupRule +func (tr *SecurityGroupRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SecurityGroupRule +func (tr *SecurityGroupRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SecurityGroupRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SecurityGroupRule) LateInitialize(attrs []byte) (bool, error) { + params := &SecurityGroupRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SecurityGroupRule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/vpc/v1alpha1/zz_securitygrouprule_types.go b/apis/vpc/v1alpha1/zz_securitygrouprule_types.go new file mode 100755 index 0000000..96120ae --- /dev/null +++ b/apis/vpc/v1alpha1/zz_securitygrouprule_types.go @@ -0,0 +1,225 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SecurityGroupRuleInitParameters struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // direction of the rule. Can be ingress (inbound) or egress (outbound). + Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this rule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets such as "self_security_group". See docs for possible options. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // ID of the security group this rule belongs to. + // +crossplane:generate:reference:type=SecurityGroup + SecurityGroupBinding *string `json:"securityGroupBinding,omitempty" tf:"security_group_binding,omitempty"` + + // Reference to a SecurityGroup to populate securityGroupBinding. + // +kubebuilder:validation:Optional + SecurityGroupBindingRef *v1.Reference `json:"securityGroupBindingRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup to populate securityGroupBinding. + // +kubebuilder:validation:Optional + SecurityGroupBindingSelector *v1.Selector `json:"securityGroupBindingSelector,omitempty" tf:"-"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type SecurityGroupRuleObservation struct { + + // Description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // direction of the rule. Can be ingress (inbound) or egress (outbound). + Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` + + // Minimum port number. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Id of the rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to assign to this rule. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets such as "self_security_group". See docs for possible options. + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // ID of the security group this rule belongs to. + SecurityGroupBinding *string `json:"securityGroupBinding,omitempty" tf:"security_group_binding,omitempty"` + + // Target security group ID for this rule. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +type SecurityGroupRuleParameters struct { + + // Description of the rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // direction of the rule. Can be ingress (inbound) or egress (outbound). + // +kubebuilder:validation:Optional + Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` + + // Minimum port number. + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Labels to assign to this rule. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Port number (if applied to a single port). + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Special-purpose targets such as "self_security_group". See docs for possible options. + // +kubebuilder:validation:Optional + PredefinedTarget *string `json:"predefinedTarget,omitempty" tf:"predefined_target,omitempty"` + + // One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // ID of the security group this rule belongs to. + // +crossplane:generate:reference:type=SecurityGroup + // +kubebuilder:validation:Optional + SecurityGroupBinding *string `json:"securityGroupBinding,omitempty" tf:"security_group_binding,omitempty"` + + // Reference to a SecurityGroup to populate securityGroupBinding. + // +kubebuilder:validation:Optional + SecurityGroupBindingRef *v1.Reference `json:"securityGroupBindingRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup to populate securityGroupBinding. + // +kubebuilder:validation:Optional + SecurityGroupBindingSelector *v1.Selector `json:"securityGroupBindingSelector,omitempty" tf:"-"` + + // Target security group ID for this rule. + // +kubebuilder:validation:Optional + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // Maximum port number. + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` + + // The blocks of IPv4 addresses for this rule. + // +kubebuilder:validation:Optional + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // The blocks of IPv6 addresses for this rule. v6_cidr_blocks argument is currently not supported. It will be available in the future. + // +kubebuilder:validation:Optional + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` +} + +// SecurityGroupRuleSpec defines the desired state of SecurityGroupRule +type SecurityGroupRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SecurityGroupRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SecurityGroupRuleInitParameters `json:"initProvider,omitempty"` +} + +// SecurityGroupRuleStatus defines the observed state of SecurityGroupRule. +type SecurityGroupRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SecurityGroupRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SecurityGroupRule is the Schema for the SecurityGroupRules API. Yandex VPC Security Group Rule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SecurityGroupRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.direction) || (has(self.initProvider) && has(self.initProvider.direction))",message="spec.forProvider.direction is a required parameter" + Spec SecurityGroupRuleSpec `json:"spec"` + Status SecurityGroupRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SecurityGroupRuleList contains a list of SecurityGroupRules +type SecurityGroupRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SecurityGroupRule `json:"items"` +} + +// Repository type metadata. +var ( + SecurityGroupRule_Kind = "SecurityGroupRule" + SecurityGroupRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SecurityGroupRule_Kind}.String() + SecurityGroupRule_KindAPIVersion = SecurityGroupRule_Kind + "." + CRDGroupVersion.String() + SecurityGroupRule_GroupVersionKind = CRDGroupVersion.WithKind(SecurityGroupRule_Kind) +) + +func init() { + SchemeBuilder.Register(&SecurityGroupRule{}, &SecurityGroupRuleList{}) +} diff --git a/apis/vpc/v1alpha1/zz_subnet_terraformed.go b/apis/vpc/v1alpha1/zz_subnet_terraformed.go new file mode 100755 index 0000000..b9617eb --- /dev/null +++ b/apis/vpc/v1alpha1/zz_subnet_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Subnet +func (mg *Subnet) GetTerraformResourceType() string { + return "yandex_vpc_subnet" +} + +// GetConnectionDetailsMapping for this Subnet +func (tr *Subnet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Subnet +func (tr *Subnet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Subnet +func (tr *Subnet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Subnet +func (tr *Subnet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Subnet +func (tr *Subnet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Subnet +func (tr *Subnet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Subnet +func (tr *Subnet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Subnet +func (tr *Subnet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Subnet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Subnet) LateInitialize(attrs []byte) (bool, error) { + params := &SubnetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Subnet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/vpc/v1alpha1/zz_subnet_types.go b/apis/vpc/v1alpha1/zz_subnet_types.go new file mode 100755 index 0000000..ffaa1d5 --- /dev/null +++ b/apis/vpc/v1alpha1/zz_subnet_types.go @@ -0,0 +1,255 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DHCPOptionsInitParameters struct { + + // Domain name. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Domain name server IP addresses. + DomainNameServers []*string `json:"domainNameServers,omitempty" tf:"domain_name_servers,omitempty"` + + // NTP server IP addresses. + NtpServers []*string `json:"ntpServers,omitempty" tf:"ntp_servers,omitempty"` +} + +type DHCPOptionsObservation struct { + + // Domain name. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Domain name server IP addresses. + DomainNameServers []*string `json:"domainNameServers,omitempty" tf:"domain_name_servers,omitempty"` + + // NTP server IP addresses. + NtpServers []*string `json:"ntpServers,omitempty" tf:"ntp_servers,omitempty"` +} + +type DHCPOptionsParameters struct { + + // Domain name. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Domain name server IP addresses. + // +kubebuilder:validation:Optional + DomainNameServers []*string `json:"domainNameServers,omitempty" tf:"domain_name_servers,omitempty"` + + // NTP server IP addresses. + // +kubebuilder:validation:Optional + NtpServers []*string `json:"ntpServers,omitempty" tf:"ntp_servers,omitempty"` +} + +type SubnetInitParameters struct { + + // Options for DHCP client. The structure is documented below. + DHCPOptions []DHCPOptionsInitParameters `json:"dhcpOptions,omitempty" tf:"dhcp_options,omitempty"` + + // An optional description of the subnet. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this subnet. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the subnet. Provided by the client when the subnet is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this subnet belongs to. Only networks that are in the distributed mode can have subnets. + // +crossplane:generate:reference:type=Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // The ID of the route table to assign to this subnet. Assigned route table should belong to the same network as this subnet. + RouteTableID *string `json:"routeTableId,omitempty" tf:"route_table_id,omitempty"` + + // A list of blocks of internal IPv4 addresses that are owned by this subnet. Provide this property when you create the subnet. For example, 10.0.0.0/22 or 192.168.0.0/16. Blocks of addresses must be unique and non-overlapping within a network. Minimum subnet size is /28, and maximum subnet size is /16. Only IPv4 is supported. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // Name of the Yandex.Cloud zone for this subnet. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type SubnetObservation struct { + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Options for DHCP client. The structure is documented below. + DHCPOptions []DHCPOptionsObservation `json:"dhcpOptions,omitempty" tf:"dhcp_options,omitempty"` + + // An optional description of the subnet. Provide this property when you create the resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Labels to assign to this subnet. A list of key/value pairs. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the subnet. Provided by the client when the subnet is created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this subnet belongs to. Only networks that are in the distributed mode can have subnets. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // The ID of the route table to assign to this subnet. Assigned route table should belong to the same network as this subnet. + RouteTableID *string `json:"routeTableId,omitempty" tf:"route_table_id,omitempty"` + + // A list of blocks of internal IPv4 addresses that are owned by this subnet. Provide this property when you create the subnet. For example, 10.0.0.0/22 or 192.168.0.0/16. Blocks of addresses must be unique and non-overlapping within a network. Minimum subnet size is /28, and maximum subnet size is /16. Only IPv4 is supported. + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // An optional list of blocks of IPv6 addresses that are owned by this subnet. + V6CidrBlocks []*string `json:"v6CidrBlocks,omitempty" tf:"v6_cidr_blocks,omitempty"` + + // Name of the Yandex.Cloud zone for this subnet. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type SubnetParameters struct { + + // Options for DHCP client. The structure is documented below. + // +kubebuilder:validation:Optional + DHCPOptions []DHCPOptionsParameters `json:"dhcpOptions,omitempty" tf:"dhcp_options,omitempty"` + + // An optional description of the subnet. Provide this property when you create the resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the folder to which the resource belongs. If omitted, the provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this subnet. A list of key/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of the subnet. Provided by the client when the subnet is created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network this subnet belongs to. Only networks that are in the distributed mode can have subnets. + // +crossplane:generate:reference:type=Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // The ID of the route table to assign to this subnet. Assigned route table should belong to the same network as this subnet. + // +kubebuilder:validation:Optional + RouteTableID *string `json:"routeTableId,omitempty" tf:"route_table_id,omitempty"` + + // A list of blocks of internal IPv4 addresses that are owned by this subnet. Provide this property when you create the subnet. For example, 10.0.0.0/22 or 192.168.0.0/16. Blocks of addresses must be unique and non-overlapping within a network. Minimum subnet size is /28, and maximum subnet size is /16. Only IPv4 is supported. + // +kubebuilder:validation:Optional + V4CidrBlocks []*string `json:"v4CidrBlocks,omitempty" tf:"v4_cidr_blocks,omitempty"` + + // Name of the Yandex.Cloud zone for this subnet. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +// SubnetSpec defines the desired state of Subnet +type SubnetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SubnetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SubnetInitParameters `json:"initProvider,omitempty"` +} + +// SubnetStatus defines the observed state of Subnet. +type SubnetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SubnetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Subnet is the Schema for the Subnets API. A VPC network is a virtual version of the traditional physical networks that exist within and between physical data centers. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Subnet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.v4CidrBlocks) || (has(self.initProvider) && has(self.initProvider.v4CidrBlocks))",message="spec.forProvider.v4CidrBlocks is a required parameter" + Spec SubnetSpec `json:"spec"` + Status SubnetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SubnetList contains a list of Subnets +type SubnetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Subnet `json:"items"` +} + +// Repository type metadata. +var ( + Subnet_Kind = "Subnet" + Subnet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Subnet_Kind}.String() + Subnet_KindAPIVersion = Subnet_Kind + "." + CRDGroupVersion.String() + Subnet_GroupVersionKind = CRDGroupVersion.WithKind(Subnet_Kind) +) + +func init() { + SchemeBuilder.Register(&Subnet{}, &SubnetList{}) +} diff --git a/apis/yandex/v1alpha1/zz_function_terraformed.go b/apis/yandex/v1alpha1/zz_function_terraformed.go index 08f7dea..2094ac3 100755 --- a/apis/yandex/v1alpha1/zz_function_terraformed.go +++ b/apis/yandex/v1alpha1/zz_function_terraformed.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -10,120 +8,118 @@ import ( "github.com/crossplane/upjet/pkg/resource" "github.com/crossplane/upjet/pkg/resource/json" - ) // GetTerraformResourceType returns Terraform resource type for this Function func (mg *Function) GetTerraformResourceType() string { - return "yandex_function" + return "yandex_function" } // GetConnectionDetailsMapping for this Function func (tr *Function) GetConnectionDetailsMapping() map[string]string { - return nil + return nil } // GetObservation of this Function func (tr *Function) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) } // SetObservation for this Function func (tr *Function) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) } // GetID returns ID of underlying Terraform resource of this Function func (tr *Function) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID } // GetParameters of this Function func (tr *Function) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // SetParameters for this Function func (tr *Function) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) } // GetInitParameters of this Function func (tr *Function) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) } // GetInitParameters of this Function func (tr *Function) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil } // LateInitialize this Function using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *Function) LateInitialize(attrs []byte) (bool, error) { - params := &FunctionParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) + params := &FunctionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) } // GetTerraformSchemaVersion returns the associated Terraform schema version func (tr *Function) GetTerraformSchemaVersion() int { - return 1 + return 1 } diff --git a/apis/yandex/v1alpha1/zz_function_types.go b/apis/yandex/v1alpha1/zz_function_types.go index 33fcf8c..20ee606 100755 --- a/apis/yandex/v1alpha1/zz_function_types.go +++ b/apis/yandex/v1alpha1/zz_function_types.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 @@ -9,859 +7,778 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - ) - - - type AsyncInvocationInitParameters struct { + // Maximum number of retries for async invocation + RetriesCount *float64 `json:"retriesCount,omitempty" tf:"retries_count,omitempty"` -// Maximum number of retries for async invocation -RetriesCount *float64 `json:"retriesCount,omitempty" tf:"retries_count,omitempty"` + // Service account used for async invocation + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Service account used for async invocation -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Target for unsuccessful async invocation + YmqFailureTarget []YmqFailureTargetInitParameters `json:"ymqFailureTarget,omitempty" tf:"ymq_failure_target,omitempty"` -// Target for unsuccessful async invocation -YmqFailureTarget []YmqFailureTargetInitParameters `json:"ymqFailureTarget,omitempty" tf:"ymq_failure_target,omitempty"` - -// Target for successful async invocation -YmqSuccessTarget []YmqSuccessTargetInitParameters `json:"ymqSuccessTarget,omitempty" tf:"ymq_success_target,omitempty"` + // Target for successful async invocation + YmqSuccessTarget []YmqSuccessTargetInitParameters `json:"ymqSuccessTarget,omitempty" tf:"ymq_success_target,omitempty"` } - type AsyncInvocationObservation struct { + // Maximum number of retries for async invocation + RetriesCount *float64 `json:"retriesCount,omitempty" tf:"retries_count,omitempty"` -// Maximum number of retries for async invocation -RetriesCount *float64 `json:"retriesCount,omitempty" tf:"retries_count,omitempty"` - -// Service account used for async invocation -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account used for async invocation + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Target for unsuccessful async invocation -YmqFailureTarget []YmqFailureTargetObservation `json:"ymqFailureTarget,omitempty" tf:"ymq_failure_target,omitempty"` + // Target for unsuccessful async invocation + YmqFailureTarget []YmqFailureTargetObservation `json:"ymqFailureTarget,omitempty" tf:"ymq_failure_target,omitempty"` -// Target for successful async invocation -YmqSuccessTarget []YmqSuccessTargetObservation `json:"ymqSuccessTarget,omitempty" tf:"ymq_success_target,omitempty"` + // Target for successful async invocation + YmqSuccessTarget []YmqSuccessTargetObservation `json:"ymqSuccessTarget,omitempty" tf:"ymq_success_target,omitempty"` } - type AsyncInvocationParameters struct { + // Maximum number of retries for async invocation + // +kubebuilder:validation:Optional + RetriesCount *float64 `json:"retriesCount,omitempty" tf:"retries_count,omitempty"` -// Maximum number of retries for async invocation -// +kubebuilder:validation:Optional -RetriesCount *float64 `json:"retriesCount,omitempty" tf:"retries_count,omitempty"` - -// Service account used for async invocation -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account used for async invocation + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Target for unsuccessful async invocation -// +kubebuilder:validation:Optional -YmqFailureTarget []YmqFailureTargetParameters `json:"ymqFailureTarget,omitempty" tf:"ymq_failure_target,omitempty"` + // Target for unsuccessful async invocation + // +kubebuilder:validation:Optional + YmqFailureTarget []YmqFailureTargetParameters `json:"ymqFailureTarget,omitempty" tf:"ymq_failure_target,omitempty"` -// Target for successful async invocation -// +kubebuilder:validation:Optional -YmqSuccessTarget []YmqSuccessTargetParameters `json:"ymqSuccessTarget,omitempty" tf:"ymq_success_target,omitempty"` + // Target for successful async invocation + // +kubebuilder:validation:Optional + YmqSuccessTarget []YmqSuccessTargetParameters `json:"ymqSuccessTarget,omitempty" tf:"ymq_success_target,omitempty"` } - type ConnectivityInitParameters struct { - -// Network the version will have access to. It's essential to specify network with subnets in all availability zones -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Network the version will have access to. It's essential to specify network with subnets in all availability zones + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` } - type ConnectivityObservation struct { - -// Network the version will have access to. It's essential to specify network with subnets in all availability zones -NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + // Network the version will have access to. It's essential to specify network with subnets in all availability zones + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` } - type ConnectivityParameters struct { - -// Network the version will have access to. It's essential to specify network with subnets in all availability zones -// +kubebuilder:validation:Optional -NetworkID *string `json:"networkId" tf:"network_id,omitempty"` + // Network the version will have access to. It's essential to specify network with subnets in all availability zones + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId" tf:"network_id,omitempty"` } - type ContentInitParameters struct { - -// Filename to zip archive for the version -ZipFilename *string `json:"zipFilename,omitempty" tf:"zip_filename,omitempty"` + // Filename to zip archive for the version + ZipFilename *string `json:"zipFilename,omitempty" tf:"zip_filename,omitempty"` } - type ContentObservation struct { - -// Filename to zip archive for the version -ZipFilename *string `json:"zipFilename,omitempty" tf:"zip_filename,omitempty"` + // Filename to zip archive for the version + ZipFilename *string `json:"zipFilename,omitempty" tf:"zip_filename,omitempty"` } - type ContentParameters struct { - -// Filename to zip archive for the version -// +kubebuilder:validation:Optional -ZipFilename *string `json:"zipFilename" tf:"zip_filename,omitempty"` + // Filename to zip archive for the version + // +kubebuilder:validation:Optional + ZipFilename *string `json:"zipFilename" tf:"zip_filename,omitempty"` } - type EphemeralDiskInitParameters struct { + // Optional block size of the ephemeral disk in KB + BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` -// Optional block size of the ephemeral disk in KB -BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` - -// Size of the ephemeral disk in GB -SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` + // Size of the ephemeral disk in GB + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` } - type EphemeralDiskObservation struct { + // Optional block size of the ephemeral disk in KB + BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` -// Optional block size of the ephemeral disk in KB -BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` - -// Size of the ephemeral disk in GB -SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` + // Size of the ephemeral disk in GB + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` } - type EphemeralDiskParameters struct { + // Optional block size of the ephemeral disk in KB + // +kubebuilder:validation:Optional + BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` -// Optional block size of the ephemeral disk in KB -// +kubebuilder:validation:Optional -BlockSizeKb *float64 `json:"blockSizeKb,omitempty" tf:"block_size_kb,omitempty"` - -// Size of the ephemeral disk in GB -// +kubebuilder:validation:Optional -SizeGb *float64 `json:"sizeGb" tf:"size_gb,omitempty"` + // Size of the ephemeral disk in GB + // +kubebuilder:validation:Optional + SizeGb *float64 `json:"sizeGb" tf:"size_gb,omitempty"` } - type FunctionInitParameters struct { + // Config for asynchronous invocations of Yandex Cloud Function + AsyncInvocation []AsyncInvocationInitParameters `json:"asyncInvocation,omitempty" tf:"async_invocation,omitempty"` -// Config for asynchronous invocations of Yandex Cloud Function -AsyncInvocation []AsyncInvocationInitParameters `json:"asyncInvocation,omitempty" tf:"async_invocation,omitempty"` + // The maximum number of requests processed by a function instance at the same time + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` -// The maximum number of requests processed by a function instance at the same time -Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` + // Function version connectivity. If specified the version will be attached to specified network + Connectivity []ConnectivityInitParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` -// Function version connectivity. If specified the version will be attached to specified network -Connectivity []ConnectivityInitParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + // Version deployment content for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified + Content []ContentInitParameters `json:"content,omitempty" tf:"content,omitempty"` -// Version deployment content for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified -Content []ContentInitParameters `json:"content,omitempty" tf:"content,omitempty"` + // Description of the Yandex Cloud Function + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the Yandex Cloud Function -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Entrypoint for Yandex Cloud Function + Entrypoint *string `json:"entrypoint,omitempty" tf:"entrypoint,omitempty"` -// Entrypoint for Yandex Cloud Function -Entrypoint *string `json:"entrypoint,omitempty" tf:"entrypoint,omitempty"` + // A set of key/value environment variables for Yandex Cloud Function + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` -// A set of key/value environment variables for Yandex Cloud Function -// +mapType=granular -Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + // Execution timeout in seconds for Yandex Cloud Function + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` -// Execution timeout in seconds for Yandex Cloud Function -ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + // Folder ID for the Yandex Cloud Function + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder ID for the Yandex Cloud Function -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // A set of key/value label pairs to assign to the Yandex Cloud Function + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Function -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Options for logging from Yandex Cloud Function + LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Options for logging from Yandex Cloud Function -LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Function + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` -// Memory in megabytes (aligned to 128MB) for Yandex Cloud Function -Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + // Mounts for Yandex Cloud Function. + Mounts []MountsInitParameters `json:"mounts,omitempty" tf:"mounts,omitempty"` -// Mounts for Yandex Cloud Function. -Mounts []MountsInitParameters `json:"mounts,omitempty" tf:"mounts,omitempty"` + // Yandex Cloud Function name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Yandex Cloud Function name used to define trigger -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Version deployment package for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified + Package []PackageInitParameters `json:"package,omitempty" tf:"package,omitempty"` -// Version deployment package for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified -Package []PackageInitParameters `json:"package,omitempty" tf:"package,omitempty"` + // Runtime for Yandex Cloud Function + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` -// Runtime for Yandex Cloud Function -Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + // Secrets for Yandex Cloud Function. + Secrets []SecretsInitParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` -// Secrets for Yandex Cloud Function. -Secrets []SecretsInitParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` + // Service account ID for Yandex Cloud Function + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Service account ID for Yandex Cloud Function -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Function + StorageMounts []StorageMountsInitParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` -// (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Function -StorageMounts []StorageMountsInitParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + // Tags for Yandex Cloud Function. Tag "$latest" isn't returned + // +listType=set + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` -// Tags for Yandex Cloud Function. Tag "$latest" isn't returned -// +listType=set -Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + // Tmpfs size for Yandex Cloud Function + TmpfsSize *float64 `json:"tmpfsSize,omitempty" tf:"tmpfs_size,omitempty"` -// Tmpfs size for Yandex Cloud Function -TmpfsSize *float64 `json:"tmpfsSize,omitempty" tf:"tmpfs_size,omitempty"` - -// User-defined string for current function version. User must change this string any times when function changed. Function will be updated when hash is changed. -UserHash *string `json:"userHash,omitempty" tf:"user_hash,omitempty"` + // User-defined string for current function version. User must change this string any times when function changed. Function will be updated when hash is changed. + UserHash *string `json:"userHash,omitempty" tf:"user_hash,omitempty"` } - type FunctionObservation struct { + // Config for asynchronous invocations of Yandex Cloud Function + AsyncInvocation []AsyncInvocationObservation `json:"asyncInvocation,omitempty" tf:"async_invocation,omitempty"` -// Config for asynchronous invocations of Yandex Cloud Function -AsyncInvocation []AsyncInvocationObservation `json:"asyncInvocation,omitempty" tf:"async_invocation,omitempty"` + // The maximum number of requests processed by a function instance at the same time + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` -// The maximum number of requests processed by a function instance at the same time -Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` + // Function version connectivity. If specified the version will be attached to specified network + Connectivity []ConnectivityObservation `json:"connectivity,omitempty" tf:"connectivity,omitempty"` -// Function version connectivity. If specified the version will be attached to specified network -Connectivity []ConnectivityObservation `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + // Version deployment content for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified + Content []ContentObservation `json:"content,omitempty" tf:"content,omitempty"` -// Version deployment content for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified -Content []ContentObservation `json:"content,omitempty" tf:"content,omitempty"` + // Creation timestamp of the Yandex Cloud Function + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` -// Creation timestamp of the Yandex Cloud Function -CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + // Description of the Yandex Cloud Function + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Description of the Yandex Cloud Function -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Entrypoint for Yandex Cloud Function + Entrypoint *string `json:"entrypoint,omitempty" tf:"entrypoint,omitempty"` -// Entrypoint for Yandex Cloud Function -Entrypoint *string `json:"entrypoint,omitempty" tf:"entrypoint,omitempty"` + // A set of key/value environment variables for Yandex Cloud Function + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` -// A set of key/value environment variables for Yandex Cloud Function -// +mapType=granular -Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + // Execution timeout in seconds for Yandex Cloud Function + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` -// Execution timeout in seconds for Yandex Cloud Function -ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + // Folder ID for the Yandex Cloud Function + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Folder ID for the Yandex Cloud Function -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Secret's id + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Secret's id -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Image size for Yandex Cloud Function + ImageSize *float64 `json:"imageSize,omitempty" tf:"image_size,omitempty"` -// Image size for Yandex Cloud Function -ImageSize *float64 `json:"imageSize,omitempty" tf:"image_size,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud Function + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// A set of key/value label pairs to assign to the Yandex Cloud Function -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // Options for logging from Yandex Cloud Function + LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Options for logging from Yandex Cloud Function -LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Function + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` -// Memory in megabytes (aligned to 128MB) for Yandex Cloud Function -Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + // Mounts for Yandex Cloud Function. + Mounts []MountsObservation `json:"mounts,omitempty" tf:"mounts,omitempty"` -// Mounts for Yandex Cloud Function. -Mounts []MountsObservation `json:"mounts,omitempty" tf:"mounts,omitempty"` + // Yandex Cloud Function name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Yandex Cloud Function name used to define trigger -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Version deployment package for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified + Package []PackageObservation `json:"package,omitempty" tf:"package,omitempty"` -// Version deployment package for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified -Package []PackageObservation `json:"package,omitempty" tf:"package,omitempty"` + // Runtime for Yandex Cloud Function + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` -// Runtime for Yandex Cloud Function -Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + // Secrets for Yandex Cloud Function. + Secrets []SecretsObservation `json:"secrets,omitempty" tf:"secrets,omitempty"` -// Secrets for Yandex Cloud Function. -Secrets []SecretsObservation `json:"secrets,omitempty" tf:"secrets,omitempty"` + // Service account ID for Yandex Cloud Function + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// Service account ID for Yandex Cloud Function -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Function + StorageMounts []StorageMountsObservation `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` -// (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Function -StorageMounts []StorageMountsObservation `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + // Tags for Yandex Cloud Function. Tag "$latest" isn't returned + // +listType=set + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` -// Tags for Yandex Cloud Function. Tag "$latest" isn't returned -// +listType=set -Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + // Tmpfs size for Yandex Cloud Function + TmpfsSize *float64 `json:"tmpfsSize,omitempty" tf:"tmpfs_size,omitempty"` -// Tmpfs size for Yandex Cloud Function -TmpfsSize *float64 `json:"tmpfsSize,omitempty" tf:"tmpfs_size,omitempty"` + // User-defined string for current function version. User must change this string any times when function changed. Function will be updated when hash is changed. + UserHash *string `json:"userHash,omitempty" tf:"user_hash,omitempty"` -// User-defined string for current function version. User must change this string any times when function changed. Function will be updated when hash is changed. -UserHash *string `json:"userHash,omitempty" tf:"user_hash,omitempty"` - -// Version for Yandex Cloud Function -Version *string `json:"version,omitempty" tf:"version,omitempty"` + // Version for Yandex Cloud Function + Version *string `json:"version,omitempty" tf:"version,omitempty"` } - type FunctionParameters struct { + // Config for asynchronous invocations of Yandex Cloud Function + // +kubebuilder:validation:Optional + AsyncInvocation []AsyncInvocationParameters `json:"asyncInvocation,omitempty" tf:"async_invocation,omitempty"` -// Config for asynchronous invocations of Yandex Cloud Function -// +kubebuilder:validation:Optional -AsyncInvocation []AsyncInvocationParameters `json:"asyncInvocation,omitempty" tf:"async_invocation,omitempty"` - -// The maximum number of requests processed by a function instance at the same time -// +kubebuilder:validation:Optional -Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` + // The maximum number of requests processed by a function instance at the same time + // +kubebuilder:validation:Optional + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` -// Function version connectivity. If specified the version will be attached to specified network -// +kubebuilder:validation:Optional -Connectivity []ConnectivityParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + // Function version connectivity. If specified the version will be attached to specified network + // +kubebuilder:validation:Optional + Connectivity []ConnectivityParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` -// Version deployment content for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified -// +kubebuilder:validation:Optional -Content []ContentParameters `json:"content,omitempty" tf:"content,omitempty"` + // Version deployment content for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified + // +kubebuilder:validation:Optional + Content []ContentParameters `json:"content,omitempty" tf:"content,omitempty"` -// Description of the Yandex Cloud Function -// +kubebuilder:validation:Optional -Description *string `json:"description,omitempty" tf:"description,omitempty"` + // Description of the Yandex Cloud Function + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` -// Entrypoint for Yandex Cloud Function -// +kubebuilder:validation:Optional -Entrypoint *string `json:"entrypoint,omitempty" tf:"entrypoint,omitempty"` + // Entrypoint for Yandex Cloud Function + // +kubebuilder:validation:Optional + Entrypoint *string `json:"entrypoint,omitempty" tf:"entrypoint,omitempty"` -// A set of key/value environment variables for Yandex Cloud Function -// +kubebuilder:validation:Optional -// +mapType=granular -Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + // A set of key/value environment variables for Yandex Cloud Function + // +kubebuilder:validation:Optional + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` -// Execution timeout in seconds for Yandex Cloud Function -// +kubebuilder:validation:Optional -ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + // Execution timeout in seconds for Yandex Cloud Function + // +kubebuilder:validation:Optional + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` -// Folder ID for the Yandex Cloud Function -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Folder ID for the Yandex Cloud Function + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Reference to a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` -// Selector for a Folder in resourcemanager to populate folderId. -// +kubebuilder:validation:Optional -FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` -// A set of key/value label pairs to assign to the Yandex Cloud Function -// +kubebuilder:validation:Optional -// +mapType=granular -Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + // A set of key/value label pairs to assign to the Yandex Cloud Function + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` -// Options for logging from Yandex Cloud Function -// +kubebuilder:validation:Optional -LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + // Options for logging from Yandex Cloud Function + // +kubebuilder:validation:Optional + LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` -// Memory in megabytes (aligned to 128MB) for Yandex Cloud Function -// +kubebuilder:validation:Optional -Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Function + // +kubebuilder:validation:Optional + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` -// Mounts for Yandex Cloud Function. -// +kubebuilder:validation:Optional -Mounts []MountsParameters `json:"mounts,omitempty" tf:"mounts,omitempty"` + // Mounts for Yandex Cloud Function. + // +kubebuilder:validation:Optional + Mounts []MountsParameters `json:"mounts,omitempty" tf:"mounts,omitempty"` -// Yandex Cloud Function name used to define trigger -// +kubebuilder:validation:Optional -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Yandex Cloud Function name used to define trigger + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Version deployment package for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified -// +kubebuilder:validation:Optional -Package []PackageParameters `json:"package,omitempty" tf:"package,omitempty"` + // Version deployment package for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified + // +kubebuilder:validation:Optional + Package []PackageParameters `json:"package,omitempty" tf:"package,omitempty"` -// Runtime for Yandex Cloud Function -// +kubebuilder:validation:Optional -Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + // Runtime for Yandex Cloud Function + // +kubebuilder:validation:Optional + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` -// Secrets for Yandex Cloud Function. -// +kubebuilder:validation:Optional -Secrets []SecretsParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` + // Secrets for Yandex Cloud Function. + // +kubebuilder:validation:Optional + Secrets []SecretsParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` -// Service account ID for Yandex Cloud Function -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account ID for Yandex Cloud Function + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` -// (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Function -// +kubebuilder:validation:Optional -StorageMounts []StorageMountsParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + // (DEPRECATED, use mounts.0.object_storage instead) Storage mounts for Yandex Cloud Function + // +kubebuilder:validation:Optional + StorageMounts []StorageMountsParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` -// Tags for Yandex Cloud Function. Tag "$latest" isn't returned -// +kubebuilder:validation:Optional -// +listType=set -Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + // Tags for Yandex Cloud Function. Tag "$latest" isn't returned + // +kubebuilder:validation:Optional + // +listType=set + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` -// Tmpfs size for Yandex Cloud Function -// +kubebuilder:validation:Optional -TmpfsSize *float64 `json:"tmpfsSize,omitempty" tf:"tmpfs_size,omitempty"` + // Tmpfs size for Yandex Cloud Function + // +kubebuilder:validation:Optional + TmpfsSize *float64 `json:"tmpfsSize,omitempty" tf:"tmpfs_size,omitempty"` -// User-defined string for current function version. User must change this string any times when function changed. Function will be updated when hash is changed. -// +kubebuilder:validation:Optional -UserHash *string `json:"userHash,omitempty" tf:"user_hash,omitempty"` + // User-defined string for current function version. User must change this string any times when function changed. Function will be updated when hash is changed. + // +kubebuilder:validation:Optional + UserHash *string `json:"userHash,omitempty" tf:"user_hash,omitempty"` } - type LogOptionsInitParameters struct { + // Is logging from function disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging from function disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// Log entries are written to default log group for specified folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type LogOptionsObservation struct { + // Is logging from function disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging from function disabled -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` - -// Log entries are written to default log group for specified folder -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to specified log group -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Minimum log entry level -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type LogOptionsParameters struct { + // Is logging from function disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` -// Is logging from function disabled -// +kubebuilder:validation:Optional -Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + // Log entries are written to default log group for specified folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` -// Log entries are written to default log group for specified folder -// +kubebuilder:validation:Optional -FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` -// Log entries are written to specified log group -// +kubebuilder:validation:Optional -LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` - -// Minimum log entry level -// +kubebuilder:validation:Optional -MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` } - type MountsInitParameters struct { + // One of the available mount types. Disk available during the function execution time + EphemeralDisk []EphemeralDiskInitParameters `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` -// One of the available mount types. Disk available during the function execution time -EphemeralDisk []EphemeralDiskInitParameters `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` + // Mount’s accessibility mode. Valid values are ro and rw + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Mount’s accessibility mode. Valid values are ro and rw -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Yandex Cloud Function name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// Yandex Cloud Function name used to define trigger -Name *string `json:"name,omitempty" tf:"name,omitempty"` - -// One of the available mount types. Object storage as a mount -ObjectStorage []ObjectStorageInitParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + // One of the available mount types. Object storage as a mount + ObjectStorage []ObjectStorageInitParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` } - type MountsObservation struct { + // One of the available mount types. Disk available during the function execution time + EphemeralDisk []EphemeralDiskObservation `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` -// One of the available mount types. Disk available during the function execution time -EphemeralDisk []EphemeralDiskObservation `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` - -// Mount’s accessibility mode. Valid values are ro and rw -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Mount’s accessibility mode. Valid values are ro and rw + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Yandex Cloud Function name used to define trigger -Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Yandex Cloud Function name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` -// One of the available mount types. Object storage as a mount -ObjectStorage []ObjectStorageObservation `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + // One of the available mount types. Object storage as a mount + ObjectStorage []ObjectStorageObservation `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` } - type MountsParameters struct { + // One of the available mount types. Disk available during the function execution time + // +kubebuilder:validation:Optional + EphemeralDisk []EphemeralDiskParameters `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` -// One of the available mount types. Disk available during the function execution time -// +kubebuilder:validation:Optional -EphemeralDisk []EphemeralDiskParameters `json:"ephemeralDisk,omitempty" tf:"ephemeral_disk,omitempty"` - -// Mount’s accessibility mode. Valid values are ro and rw -// +kubebuilder:validation:Optional -Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + // Mount’s accessibility mode. Valid values are ro and rw + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` -// Yandex Cloud Function name used to define trigger -// +kubebuilder:validation:Optional -Name *string `json:"name" tf:"name,omitempty"` + // Yandex Cloud Function name used to define trigger + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` -// One of the available mount types. Object storage as a mount -// +kubebuilder:validation:Optional -ObjectStorage []ObjectStorageParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + // One of the available mount types. Object storage as a mount + // +kubebuilder:validation:Optional + ObjectStorage []ObjectStorageParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` } - type ObjectStorageInitParameters struct { + // Name of the mounting bucket + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` -// Reference to a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` -// Selector for a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` - -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` } - type ObjectStorageObservation struct { + // Name of the mounting bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` - -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` } - type ObjectStorageParameters struct { + // Name of the mounting bucket + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) -// +kubebuilder:validation:Optional -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` - -// Reference to a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` -// Selector for a Bucket in storage to populate bucket. -// +kubebuilder:validation:Optional -BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` } - type PackageInitParameters struct { + // Name of the bucket that stores the code for the version + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` -// Name of the bucket that stores the code for the version -BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` - -// Name of the object in the bucket that stores the code for the version -ObjectName *string `json:"objectName,omitempty" tf:"object_name,omitempty"` + // Name of the object in the bucket that stores the code for the version + ObjectName *string `json:"objectName,omitempty" tf:"object_name,omitempty"` -// SHA256 hash of the version deployment package -Sha256 *string `json:"sha256,omitempty" tf:"sha_256,omitempty"` + // SHA256 hash of the version deployment package + Sha256 *string `json:"sha256,omitempty" tf:"sha_256,omitempty"` } - type PackageObservation struct { + // Name of the bucket that stores the code for the version + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` -// Name of the bucket that stores the code for the version -BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + // Name of the object in the bucket that stores the code for the version + ObjectName *string `json:"objectName,omitempty" tf:"object_name,omitempty"` -// Name of the object in the bucket that stores the code for the version -ObjectName *string `json:"objectName,omitempty" tf:"object_name,omitempty"` - -// SHA256 hash of the version deployment package -Sha256 *string `json:"sha256,omitempty" tf:"sha_256,omitempty"` + // SHA256 hash of the version deployment package + Sha256 *string `json:"sha256,omitempty" tf:"sha_256,omitempty"` } - type PackageParameters struct { + // Name of the bucket that stores the code for the version + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` -// Name of the bucket that stores the code for the version -// +kubebuilder:validation:Optional -BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` + // Name of the object in the bucket that stores the code for the version + // +kubebuilder:validation:Optional + ObjectName *string `json:"objectName" tf:"object_name,omitempty"` -// Name of the object in the bucket that stores the code for the version -// +kubebuilder:validation:Optional -ObjectName *string `json:"objectName" tf:"object_name,omitempty"` - -// SHA256 hash of the version deployment package -// +kubebuilder:validation:Optional -Sha256 *string `json:"sha256,omitempty" tf:"sha_256,omitempty"` + // SHA256 hash of the version deployment package + // +kubebuilder:validation:Optional + Sha256 *string `json:"sha256,omitempty" tf:"sha_256,omitempty"` } - type SecretsInitParameters struct { + // Function's environment variable in which secret's value will be stored + EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` -// Function's environment variable in which secret's value will be stored -EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` - -// Secret's id -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.Secret -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Secret's id + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Reference to a Secret in lockbox to populate id. -// +kubebuilder:validation:Optional -IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + // Reference to a Secret in lockbox to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` -// Selector for a Secret in lockbox to populate id. -// +kubebuilder:validation:Optional -IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + // Selector for a Secret in lockbox to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` -// Secret's entries key which value will be stored in environment variable -Key *string `json:"key,omitempty" tf:"key,omitempty"` + // Secret's entries key which value will be stored in environment variable + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// Secret's version id -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.SecretVersion -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + // Secret's version id + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.SecretVersion + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` -// Reference to a SecretVersion in lockbox to populate versionId. -// +kubebuilder:validation:Optional -VersionIDRef *v1.Reference `json:"versionIdRef,omitempty" tf:"-"` + // Reference to a SecretVersion in lockbox to populate versionId. + // +kubebuilder:validation:Optional + VersionIDRef *v1.Reference `json:"versionIdRef,omitempty" tf:"-"` -// Selector for a SecretVersion in lockbox to populate versionId. -// +kubebuilder:validation:Optional -VersionIDSelector *v1.Selector `json:"versionIdSelector,omitempty" tf:"-"` + // Selector for a SecretVersion in lockbox to populate versionId. + // +kubebuilder:validation:Optional + VersionIDSelector *v1.Selector `json:"versionIdSelector,omitempty" tf:"-"` } - type SecretsObservation struct { + // Function's environment variable in which secret's value will be stored + EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` -// Function's environment variable in which secret's value will be stored -EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` - -// Secret's id -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Secret's id + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Secret's entries key which value will be stored in environment variable -Key *string `json:"key,omitempty" tf:"key,omitempty"` + // Secret's entries key which value will be stored in environment variable + Key *string `json:"key,omitempty" tf:"key,omitempty"` -// Secret's version id -VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + // Secret's version id + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` } - type SecretsParameters struct { + // Function's environment variable in which secret's value will be stored + // +kubebuilder:validation:Optional + EnvironmentVariable *string `json:"environmentVariable" tf:"environment_variable,omitempty"` -// Function's environment variable in which secret's value will be stored -// +kubebuilder:validation:Optional -EnvironmentVariable *string `json:"environmentVariable" tf:"environment_variable,omitempty"` - -// Secret's id -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.Secret -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -ID *string `json:"id,omitempty" tf:"id,omitempty"` + // Secret's id + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` -// Reference to a Secret in lockbox to populate id. -// +kubebuilder:validation:Optional -IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + // Reference to a Secret in lockbox to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` -// Selector for a Secret in lockbox to populate id. -// +kubebuilder:validation:Optional -IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + // Selector for a Secret in lockbox to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` -// Secret's entries key which value will be stored in environment variable -// +kubebuilder:validation:Optional -Key *string `json:"key" tf:"key,omitempty"` + // Secret's entries key which value will be stored in environment variable + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` -// Secret's version id -// +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.SecretVersion -// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() -// +kubebuilder:validation:Optional -VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + // Secret's version id + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1.SecretVersion + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` -// Reference to a SecretVersion in lockbox to populate versionId. -// +kubebuilder:validation:Optional -VersionIDRef *v1.Reference `json:"versionIdRef,omitempty" tf:"-"` + // Reference to a SecretVersion in lockbox to populate versionId. + // +kubebuilder:validation:Optional + VersionIDRef *v1.Reference `json:"versionIdRef,omitempty" tf:"-"` -// Selector for a SecretVersion in lockbox to populate versionId. -// +kubebuilder:validation:Optional -VersionIDSelector *v1.Selector `json:"versionIdSelector,omitempty" tf:"-"` + // Selector for a SecretVersion in lockbox to populate versionId. + // +kubebuilder:validation:Optional + VersionIDSelector *v1.Selector `json:"versionIdSelector,omitempty" tf:"-"` } - type StorageMountsInitParameters struct { + // Name of the mounting bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + // Name of the mount point. The directory where the bucket is mounted will be accessible at the /function/storage/ path + MountPointName *string `json:"mountPointName,omitempty" tf:"mount_point_name,omitempty"` -// Name of the mount point. The directory where the bucket is mounted will be accessible at the /function/storage/ path -MountPointName *string `json:"mountPointName,omitempty" tf:"mount_point_name,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` - -// Mount the bucket in read-only mode -ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + // Mount the bucket in read-only mode + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` } - type StorageMountsObservation struct { + // Name of the mounting bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` -// Name of the mounting bucket -Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` - -// Name of the mount point. The directory where the bucket is mounted will be accessible at the /function/storage/ path -MountPointName *string `json:"mountPointName,omitempty" tf:"mount_point_name,omitempty"` + // Name of the mount point. The directory where the bucket is mounted will be accessible at the /function/storage/ path + MountPointName *string `json:"mountPointName,omitempty" tf:"mount_point_name,omitempty"` -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Mount the bucket in read-only mode -ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + // Mount the bucket in read-only mode + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` } - type StorageMountsParameters struct { + // Name of the mounting bucket + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` -// Name of the mounting bucket -// +kubebuilder:validation:Optional -Bucket *string `json:"bucket" tf:"bucket,omitempty"` - -// Name of the mount point. The directory where the bucket is mounted will be accessible at the /function/storage/ path -// +kubebuilder:validation:Optional -MountPointName *string `json:"mountPointName" tf:"mount_point_name,omitempty"` + // Name of the mount point. The directory where the bucket is mounted will be accessible at the /function/storage/ path + // +kubebuilder:validation:Optional + MountPointName *string `json:"mountPointName" tf:"mount_point_name,omitempty"` -// Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted -// +kubebuilder:validation:Optional -Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` -// Mount the bucket in read-only mode -// +kubebuilder:validation:Optional -ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` + // Mount the bucket in read-only mode + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` } - type YmqFailureTargetInitParameters struct { + // YMQ ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` -// YMQ ARN -Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` - -// Service account ID for Yandex Cloud Function -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account ID for Yandex Cloud Function + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type YmqFailureTargetObservation struct { + // YMQ ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` -// YMQ ARN -Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` - -// Service account ID for Yandex Cloud Function -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account ID for Yandex Cloud Function + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type YmqFailureTargetParameters struct { + // YMQ ARN + // +kubebuilder:validation:Optional + Arn *string `json:"arn" tf:"arn,omitempty"` -// YMQ ARN -// +kubebuilder:validation:Optional -Arn *string `json:"arn" tf:"arn,omitempty"` - -// Service account ID for Yandex Cloud Function -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` + // Service account ID for Yandex Cloud Function + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` } - type YmqSuccessTargetInitParameters struct { + // YMQ ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` -// YMQ ARN -Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` - -// Service account used for writing result to queue -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account used for writing result to queue + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type YmqSuccessTargetObservation struct { + // YMQ ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` -// YMQ ARN -Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` - -// Service account used for writing result to queue -ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + // Service account used for writing result to queue + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` } - type YmqSuccessTargetParameters struct { + // YMQ ARN + // +kubebuilder:validation:Optional + Arn *string `json:"arn" tf:"arn,omitempty"` -// YMQ ARN -// +kubebuilder:validation:Optional -Arn *string `json:"arn" tf:"arn,omitempty"` - -// Service account used for writing result to queue -// +kubebuilder:validation:Optional -ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` + // Service account used for writing result to queue + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` } // FunctionSpec defines the desired state of Function type FunctionSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider FunctionParameters `json:"forProvider"` + ForProvider FunctionParameters `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -872,20 +789,19 @@ type FunctionSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider FunctionInitParameters `json:"initProvider,omitempty"` + InitProvider FunctionInitParameters `json:"initProvider,omitempty"` } // FunctionStatus defines the observed state of Function. type FunctionStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider FunctionObservation `json:"atProvider,omitempty"` + AtProvider FunctionObservation `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion - // Function is the Schema for the Functions API. Allows management of a Yandex Cloud Function. // +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" @@ -895,13 +811,13 @@ type FunctionStatus struct { type Function struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.entrypoint) || (has(self.initProvider) && has(self.initProvider.entrypoint))",message="spec.forProvider.entrypoint is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.memory) || (has(self.initProvider) && has(self.initProvider.memory))",message="spec.forProvider.memory is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.runtime) || (has(self.initProvider) && has(self.initProvider.runtime))",message="spec.forProvider.runtime is a required parameter" -// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userHash) || (has(self.initProvider) && has(self.initProvider.userHash))",message="spec.forProvider.userHash is a required parameter" - Spec FunctionSpec `json:"spec"` - Status FunctionStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.entrypoint) || (has(self.initProvider) && has(self.initProvider.entrypoint))",message="spec.forProvider.entrypoint is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.memory) || (has(self.initProvider) && has(self.initProvider.memory))",message="spec.forProvider.memory is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.runtime) || (has(self.initProvider) && has(self.initProvider.runtime))",message="spec.forProvider.runtime is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userHash) || (has(self.initProvider) && has(self.initProvider.userHash))",message="spec.forProvider.userHash is a required parameter" + Spec FunctionSpec `json:"spec"` + Status FunctionStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/yandex/v1alpha1/zz_generated.conversion_hubs.go b/apis/yandex/v1alpha1/zz_generated.conversion_hubs.go index 89326c4..69d97c3 100755 --- a/apis/yandex/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/yandex/v1alpha1/zz_generated.conversion_hubs.go @@ -1,10 +1,6 @@ - - // Code generated by upjet. DO NOT EDIT. package v1alpha1 - - // Hub marks this type as a conversion hub. - func (tr *Function) Hub() {} - +// Hub marks this type as a conversion hub. +func (tr *Function) Hub() {} diff --git a/apis/yandex/v1alpha1/zz_generated.deepcopy.go b/apis/yandex/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..8aae5a6 --- /dev/null +++ b/apis/yandex/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1770 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInvocationInitParameters) DeepCopyInto(out *AsyncInvocationInitParameters) { + *out = *in + if in.RetriesCount != nil { + in, out := &in.RetriesCount, &out.RetriesCount + *out = new(float64) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.YmqFailureTarget != nil { + in, out := &in.YmqFailureTarget, &out.YmqFailureTarget + *out = make([]YmqFailureTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YmqSuccessTarget != nil { + in, out := &in.YmqSuccessTarget, &out.YmqSuccessTarget + *out = make([]YmqSuccessTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInvocationInitParameters. +func (in *AsyncInvocationInitParameters) DeepCopy() *AsyncInvocationInitParameters { + if in == nil { + return nil + } + out := new(AsyncInvocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInvocationObservation) DeepCopyInto(out *AsyncInvocationObservation) { + *out = *in + if in.RetriesCount != nil { + in, out := &in.RetriesCount, &out.RetriesCount + *out = new(float64) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.YmqFailureTarget != nil { + in, out := &in.YmqFailureTarget, &out.YmqFailureTarget + *out = make([]YmqFailureTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YmqSuccessTarget != nil { + in, out := &in.YmqSuccessTarget, &out.YmqSuccessTarget + *out = make([]YmqSuccessTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInvocationObservation. +func (in *AsyncInvocationObservation) DeepCopy() *AsyncInvocationObservation { + if in == nil { + return nil + } + out := new(AsyncInvocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInvocationParameters) DeepCopyInto(out *AsyncInvocationParameters) { + *out = *in + if in.RetriesCount != nil { + in, out := &in.RetriesCount, &out.RetriesCount + *out = new(float64) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.YmqFailureTarget != nil { + in, out := &in.YmqFailureTarget, &out.YmqFailureTarget + *out = make([]YmqFailureTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YmqSuccessTarget != nil { + in, out := &in.YmqSuccessTarget, &out.YmqSuccessTarget + *out = make([]YmqSuccessTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInvocationParameters. +func (in *AsyncInvocationParameters) DeepCopy() *AsyncInvocationParameters { + if in == nil { + return nil + } + out := new(AsyncInvocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityInitParameters) DeepCopyInto(out *ConnectivityInitParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInitParameters. +func (in *ConnectivityInitParameters) DeepCopy() *ConnectivityInitParameters { + if in == nil { + return nil + } + out := new(ConnectivityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityObservation) DeepCopyInto(out *ConnectivityObservation) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityObservation. +func (in *ConnectivityObservation) DeepCopy() *ConnectivityObservation { + if in == nil { + return nil + } + out := new(ConnectivityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityParameters) DeepCopyInto(out *ConnectivityParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityParameters. +func (in *ConnectivityParameters) DeepCopy() *ConnectivityParameters { + if in == nil { + return nil + } + out := new(ConnectivityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentInitParameters) DeepCopyInto(out *ContentInitParameters) { + *out = *in + if in.ZipFilename != nil { + in, out := &in.ZipFilename, &out.ZipFilename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentInitParameters. +func (in *ContentInitParameters) DeepCopy() *ContentInitParameters { + if in == nil { + return nil + } + out := new(ContentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentObservation) DeepCopyInto(out *ContentObservation) { + *out = *in + if in.ZipFilename != nil { + in, out := &in.ZipFilename, &out.ZipFilename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentObservation. +func (in *ContentObservation) DeepCopy() *ContentObservation { + if in == nil { + return nil + } + out := new(ContentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentParameters) DeepCopyInto(out *ContentParameters) { + *out = *in + if in.ZipFilename != nil { + in, out := &in.ZipFilename, &out.ZipFilename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentParameters. +func (in *ContentParameters) DeepCopy() *ContentParameters { + if in == nil { + return nil + } + out := new(ContentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralDiskInitParameters) DeepCopyInto(out *EphemeralDiskInitParameters) { + *out = *in + if in.BlockSizeKb != nil { + in, out := &in.BlockSizeKb, &out.BlockSizeKb + *out = new(float64) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralDiskInitParameters. +func (in *EphemeralDiskInitParameters) DeepCopy() *EphemeralDiskInitParameters { + if in == nil { + return nil + } + out := new(EphemeralDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralDiskObservation) DeepCopyInto(out *EphemeralDiskObservation) { + *out = *in + if in.BlockSizeKb != nil { + in, out := &in.BlockSizeKb, &out.BlockSizeKb + *out = new(float64) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralDiskObservation. +func (in *EphemeralDiskObservation) DeepCopy() *EphemeralDiskObservation { + if in == nil { + return nil + } + out := new(EphemeralDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralDiskParameters) DeepCopyInto(out *EphemeralDiskParameters) { + *out = *in + if in.BlockSizeKb != nil { + in, out := &in.BlockSizeKb, &out.BlockSizeKb + *out = new(float64) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralDiskParameters. +func (in *EphemeralDiskParameters) DeepCopy() *EphemeralDiskParameters { + if in == nil { + return nil + } + out := new(EphemeralDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionInitParameters) DeepCopyInto(out *FunctionInitParameters) { + *out = *in + if in.AsyncInvocation != nil { + in, out := &in.AsyncInvocation, &out.AsyncInvocation + *out = make([]AsyncInvocationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]ContentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]MountsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Package != nil { + in, out := &in.Package, &out.Package + *out = make([]PackageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TmpfsSize != nil { + in, out := &in.TmpfsSize, &out.TmpfsSize + *out = new(float64) + **out = **in + } + if in.UserHash != nil { + in, out := &in.UserHash, &out.UserHash + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionInitParameters. +func (in *FunctionInitParameters) DeepCopy() *FunctionInitParameters { + if in == nil { + return nil + } + out := new(FunctionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionList) DeepCopyInto(out *FunctionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Function, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList. +func (in *FunctionList) DeepCopy() *FunctionList { + if in == nil { + return nil + } + out := new(FunctionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionObservation) DeepCopyInto(out *FunctionObservation) { + *out = *in + if in.AsyncInvocation != nil { + in, out := &in.AsyncInvocation, &out.AsyncInvocation + *out = make([]AsyncInvocationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]ContentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageSize != nil { + in, out := &in.ImageSize, &out.ImageSize + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]MountsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Package != nil { + in, out := &in.Package, &out.Package + *out = make([]PackageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TmpfsSize != nil { + in, out := &in.TmpfsSize, &out.TmpfsSize + *out = new(float64) + **out = **in + } + if in.UserHash != nil { + in, out := &in.UserHash, &out.UserHash + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionObservation. +func (in *FunctionObservation) DeepCopy() *FunctionObservation { + if in == nil { + return nil + } + out := new(FunctionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionParameters) DeepCopyInto(out *FunctionParameters) { + *out = *in + if in.AsyncInvocation != nil { + in, out := &in.AsyncInvocation, &out.AsyncInvocation + *out = make([]AsyncInvocationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]ContentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]MountsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Package != nil { + in, out := &in.Package, &out.Package + *out = make([]PackageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TmpfsSize != nil { + in, out := &in.TmpfsSize, &out.TmpfsSize + *out = new(float64) + **out = **in + } + if in.UserHash != nil { + in, out := &in.UserHash, &out.UserHash + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionParameters. +func (in *FunctionParameters) DeepCopy() *FunctionParameters { + if in == nil { + return nil + } + out := new(FunctionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. +func (in *FunctionStatus) DeepCopy() *FunctionStatus { + if in == nil { + return nil + } + out := new(FunctionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsInitParameters) DeepCopyInto(out *LogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsInitParameters. +func (in *LogOptionsInitParameters) DeepCopy() *LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsObservation) DeepCopyInto(out *LogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsObservation. +func (in *LogOptionsObservation) DeepCopy() *LogOptionsObservation { + if in == nil { + return nil + } + out := new(LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsParameters) DeepCopyInto(out *LogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsParameters. +func (in *LogOptionsParameters) DeepCopy() *LogOptionsParameters { + if in == nil { + return nil + } + out := new(LogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MountsInitParameters) DeepCopyInto(out *MountsInitParameters) { + *out = *in + if in.EphemeralDisk != nil { + in, out := &in.EphemeralDisk, &out.EphemeralDisk + *out = make([]EphemeralDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MountsInitParameters. +func (in *MountsInitParameters) DeepCopy() *MountsInitParameters { + if in == nil { + return nil + } + out := new(MountsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MountsObservation) DeepCopyInto(out *MountsObservation) { + *out = *in + if in.EphemeralDisk != nil { + in, out := &in.EphemeralDisk, &out.EphemeralDisk + *out = make([]EphemeralDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MountsObservation. +func (in *MountsObservation) DeepCopy() *MountsObservation { + if in == nil { + return nil + } + out := new(MountsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MountsParameters) DeepCopyInto(out *MountsParameters) { + *out = *in + if in.EphemeralDisk != nil { + in, out := &in.EphemeralDisk, &out.EphemeralDisk + *out = make([]EphemeralDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MountsParameters. +func (in *MountsParameters) DeepCopy() *MountsParameters { + if in == nil { + return nil + } + out := new(MountsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageInitParameters) DeepCopyInto(out *ObjectStorageInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageInitParameters. +func (in *ObjectStorageInitParameters) DeepCopy() *ObjectStorageInitParameters { + if in == nil { + return nil + } + out := new(ObjectStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageObservation) DeepCopyInto(out *ObjectStorageObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageObservation. +func (in *ObjectStorageObservation) DeepCopy() *ObjectStorageObservation { + if in == nil { + return nil + } + out := new(ObjectStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageParameters) DeepCopyInto(out *ObjectStorageParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageParameters. +func (in *ObjectStorageParameters) DeepCopy() *ObjectStorageParameters { + if in == nil { + return nil + } + out := new(ObjectStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageInitParameters) DeepCopyInto(out *PackageInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ObjectName != nil { + in, out := &in.ObjectName, &out.ObjectName + *out = new(string) + **out = **in + } + if in.Sha256 != nil { + in, out := &in.Sha256, &out.Sha256 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageInitParameters. +func (in *PackageInitParameters) DeepCopy() *PackageInitParameters { + if in == nil { + return nil + } + out := new(PackageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageObservation) DeepCopyInto(out *PackageObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ObjectName != nil { + in, out := &in.ObjectName, &out.ObjectName + *out = new(string) + **out = **in + } + if in.Sha256 != nil { + in, out := &in.Sha256, &out.Sha256 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageObservation. +func (in *PackageObservation) DeepCopy() *PackageObservation { + if in == nil { + return nil + } + out := new(PackageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageParameters) DeepCopyInto(out *PackageParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ObjectName != nil { + in, out := &in.ObjectName, &out.ObjectName + *out = new(string) + **out = **in + } + if in.Sha256 != nil { + in, out := &in.Sha256, &out.Sha256 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageParameters. +func (in *PackageParameters) DeepCopy() *PackageParameters { + if in == nil { + return nil + } + out := new(PackageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsInitParameters) DeepCopyInto(out *SecretsInitParameters) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } + if in.VersionIDRef != nil { + in, out := &in.VersionIDRef, &out.VersionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VersionIDSelector != nil { + in, out := &in.VersionIDSelector, &out.VersionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsInitParameters. +func (in *SecretsInitParameters) DeepCopy() *SecretsInitParameters { + if in == nil { + return nil + } + out := new(SecretsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsObservation) DeepCopyInto(out *SecretsObservation) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsObservation. +func (in *SecretsObservation) DeepCopy() *SecretsObservation { + if in == nil { + return nil + } + out := new(SecretsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsParameters) DeepCopyInto(out *SecretsParameters) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } + if in.VersionIDRef != nil { + in, out := &in.VersionIDRef, &out.VersionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VersionIDSelector != nil { + in, out := &in.VersionIDSelector, &out.VersionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsParameters. +func (in *SecretsParameters) DeepCopy() *SecretsParameters { + if in == nil { + return nil + } + out := new(SecretsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsInitParameters) DeepCopyInto(out *StorageMountsInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointName != nil { + in, out := &in.MountPointName, &out.MountPointName + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsInitParameters. +func (in *StorageMountsInitParameters) DeepCopy() *StorageMountsInitParameters { + if in == nil { + return nil + } + out := new(StorageMountsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsObservation) DeepCopyInto(out *StorageMountsObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointName != nil { + in, out := &in.MountPointName, &out.MountPointName + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsObservation. +func (in *StorageMountsObservation) DeepCopy() *StorageMountsObservation { + if in == nil { + return nil + } + out := new(StorageMountsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsParameters) DeepCopyInto(out *StorageMountsParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointName != nil { + in, out := &in.MountPointName, &out.MountPointName + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsParameters. +func (in *StorageMountsParameters) DeepCopy() *StorageMountsParameters { + if in == nil { + return nil + } + out := new(StorageMountsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqFailureTargetInitParameters) DeepCopyInto(out *YmqFailureTargetInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqFailureTargetInitParameters. +func (in *YmqFailureTargetInitParameters) DeepCopy() *YmqFailureTargetInitParameters { + if in == nil { + return nil + } + out := new(YmqFailureTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqFailureTargetObservation) DeepCopyInto(out *YmqFailureTargetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqFailureTargetObservation. +func (in *YmqFailureTargetObservation) DeepCopy() *YmqFailureTargetObservation { + if in == nil { + return nil + } + out := new(YmqFailureTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqFailureTargetParameters) DeepCopyInto(out *YmqFailureTargetParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqFailureTargetParameters. +func (in *YmqFailureTargetParameters) DeepCopy() *YmqFailureTargetParameters { + if in == nil { + return nil + } + out := new(YmqFailureTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqSuccessTargetInitParameters) DeepCopyInto(out *YmqSuccessTargetInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqSuccessTargetInitParameters. +func (in *YmqSuccessTargetInitParameters) DeepCopy() *YmqSuccessTargetInitParameters { + if in == nil { + return nil + } + out := new(YmqSuccessTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqSuccessTargetObservation) DeepCopyInto(out *YmqSuccessTargetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqSuccessTargetObservation. +func (in *YmqSuccessTargetObservation) DeepCopy() *YmqSuccessTargetObservation { + if in == nil { + return nil + } + out := new(YmqSuccessTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqSuccessTargetParameters) DeepCopyInto(out *YmqSuccessTargetParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqSuccessTargetParameters. +func (in *YmqSuccessTargetParameters) DeepCopy() *YmqSuccessTargetParameters { + if in == nil { + return nil + } + out := new(YmqSuccessTargetParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/yandex/v1alpha1/zz_generated.resolvers.go b/apis/yandex/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..34ef644 --- /dev/null +++ b/apis/yandex/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,169 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Function. +func (mg *Function) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Mounts); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Mounts[i3].ObjectStorage); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].Bucket), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].BucketRef, + Selector: mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].BucketSelector, + To: reference.To{ + List: &v1alpha11.BucketList{}, + Managed: &v1alpha11.Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].Bucket") + } + mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Mounts[i3].ObjectStorage[i4].BucketRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Secrets); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Secrets[i3].ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Secrets[i3].IDRef, + Selector: mg.Spec.ForProvider.Secrets[i3].IDSelector, + To: reference.To{ + List: &v1alpha12.SecretList{}, + Managed: &v1alpha12.Secret{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Secrets[i3].ID") + } + mg.Spec.ForProvider.Secrets[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Secrets[i3].IDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Secrets); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Secrets[i3].VersionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Secrets[i3].VersionIDRef, + Selector: mg.Spec.ForProvider.Secrets[i3].VersionIDSelector, + To: reference.To{ + List: &v1alpha12.SecretVersionList{}, + Managed: &v1alpha12.SecretVersion{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Secrets[i3].VersionID") + } + mg.Spec.ForProvider.Secrets[i3].VersionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Secrets[i3].VersionIDRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Mounts); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Mounts[i3].ObjectStorage); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].Bucket), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].BucketRef, + Selector: mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].BucketSelector, + To: reference.To{ + List: &v1alpha11.BucketList{}, + Managed: &v1alpha11.Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].Bucket") + } + mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Mounts[i3].ObjectStorage[i4].BucketRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Secrets); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Secrets[i3].ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Secrets[i3].IDRef, + Selector: mg.Spec.InitProvider.Secrets[i3].IDSelector, + To: reference.To{ + List: &v1alpha12.SecretList{}, + Managed: &v1alpha12.Secret{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Secrets[i3].ID") + } + mg.Spec.InitProvider.Secrets[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Secrets[i3].IDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Secrets); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Secrets[i3].VersionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Secrets[i3].VersionIDRef, + Selector: mg.Spec.InitProvider.Secrets[i3].VersionIDSelector, + To: reference.To{ + List: &v1alpha12.SecretVersionList{}, + Managed: &v1alpha12.SecretVersion{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Secrets[i3].VersionID") + } + mg.Spec.InitProvider.Secrets[i3].VersionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Secrets[i3].VersionIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/yandex/v1alpha1/zz_groupversion_info.go b/apis/yandex/v1alpha1/zz_groupversion_info.go index 69101d6..7cbc2b6 100755 --- a/apis/yandex/v1alpha1/zz_groupversion_info.go +++ b/apis/yandex/v1alpha1/zz_groupversion_info.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. // +kubebuilder:object:generate=true diff --git a/apis/ydb/v1alpha1/zz_databasededicated_terraformed.go b/apis/ydb/v1alpha1/zz_databasededicated_terraformed.go new file mode 100755 index 0000000..4657a17 --- /dev/null +++ b/apis/ydb/v1alpha1/zz_databasededicated_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DatabaseDedicated +func (mg *DatabaseDedicated) GetTerraformResourceType() string { + return "yandex_ydb_database_dedicated" +} + +// GetConnectionDetailsMapping for this DatabaseDedicated +func (tr *DatabaseDedicated) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DatabaseDedicated +func (tr *DatabaseDedicated) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DatabaseDedicated +func (tr *DatabaseDedicated) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DatabaseDedicated +func (tr *DatabaseDedicated) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DatabaseDedicated +func (tr *DatabaseDedicated) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DatabaseDedicated +func (tr *DatabaseDedicated) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DatabaseDedicated +func (tr *DatabaseDedicated) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DatabaseDedicated +func (tr *DatabaseDedicated) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DatabaseDedicated using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DatabaseDedicated) LateInitialize(attrs []byte) (bool, error) { + params := &DatabaseDedicatedParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DatabaseDedicated) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ydb/v1alpha1/zz_databasededicated_types.go b/apis/ydb/v1alpha1/zz_databasededicated_types.go new file mode 100755 index 0000000..4f77f96 --- /dev/null +++ b/apis/ydb/v1alpha1/zz_databasededicated_types.go @@ -0,0 +1,407 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DatabaseDedicatedInitParameters struct { + + // Whether public IP addresses should be assigned to the Yandex Database cluster. + AssignPublicIps *bool `json:"assignPublicIps,omitempty" tf:"assign_public_ips,omitempty"` + + // Inhibits deletion of the database. Can be either true or false + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // A description for the Yandex Database cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the Yandex Database cluster belongs to. It will be deduced from provider configuration if not set explicitly. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Database cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Location for the Yandex Database cluster. The structure is documented below. + Location []LocationInitParameters `json:"location,omitempty" tf:"location,omitempty"` + + // Location ID for the Yandex Database cluster. + LocationID *string `json:"locationId,omitempty" tf:"location_id,omitempty"` + + // Name of the Yandex Database cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network to attach the Yandex Database cluster to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // The Yandex Database cluster preset. Available presets can be obtained via yc ydb resource-preset list command. + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + + // Scaling policy for the Yandex Database cluster. The structure is documented below. + ScalePolicy []ScalePolicyInitParameters `json:"scalePolicy,omitempty" tf:"scale_policy,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + + // A list of storage configuration options for the Yandex Database cluster. The structure is documented below. + StorageConfig []StorageConfigInitParameters `json:"storageConfig,omitempty" tf:"storage_config,omitempty"` + + // List of subnet IDs to attach the Yandex Database cluster to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` +} + +type DatabaseDedicatedObservation struct { + + // Whether public IP addresses should be assigned to the Yandex Database cluster. + AssignPublicIps *bool `json:"assignPublicIps,omitempty" tf:"assign_public_ips,omitempty"` + + // The Yandex Database cluster creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Full database path of the Yandex Database cluster. Useful for SDK configuration. + DatabasePath *string `json:"databasePath,omitempty" tf:"database_path,omitempty"` + + // Inhibits deletion of the database. Can be either true or false + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // A description for the Yandex Database cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the Yandex Database cluster belongs to. It will be deduced from provider configuration if not set explicitly. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // ID of the Yandex Database cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Database cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Location for the Yandex Database cluster. The structure is documented below. + Location []LocationObservation `json:"location,omitempty" tf:"location,omitempty"` + + // Location ID for the Yandex Database cluster. + LocationID *string `json:"locationId,omitempty" tf:"location_id,omitempty"` + + // Name of the Yandex Database cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network to attach the Yandex Database cluster to. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // The Yandex Database cluster preset. Available presets can be obtained via yc ydb resource-preset list command. + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + + // Scaling policy for the Yandex Database cluster. The structure is documented below. + ScalePolicy []ScalePolicyObservation `json:"scalePolicy,omitempty" tf:"scale_policy,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + + // Status of the Yandex Database cluster. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A list of storage configuration options for the Yandex Database cluster. The structure is documented below. + StorageConfig []StorageConfigObservation `json:"storageConfig,omitempty" tf:"storage_config,omitempty"` + + // List of subnet IDs to attach the Yandex Database cluster to. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Whether TLS is enabled for the Yandex Database cluster. Useful for SDK configuration. + TLSEnabled *bool `json:"tlsEnabled,omitempty" tf:"tls_enabled,omitempty"` + + // API endpoint of the Yandex Database cluster. Useful for SDK configuration. + YdbAPIEndpoint *string `json:"ydbApiEndpoint,omitempty" tf:"ydb_api_endpoint,omitempty"` + + // Full endpoint of the Yandex Database cluster. + YdbFullEndpoint *string `json:"ydbFullEndpoint,omitempty" tf:"ydb_full_endpoint,omitempty"` +} + +type DatabaseDedicatedParameters struct { + + // Whether public IP addresses should be assigned to the Yandex Database cluster. + // +kubebuilder:validation:Optional + AssignPublicIps *bool `json:"assignPublicIps,omitempty" tf:"assign_public_ips,omitempty"` + + // Inhibits deletion of the database. Can be either true or false + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // A description for the Yandex Database cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the Yandex Database cluster belongs to. It will be deduced from provider configuration if not set explicitly. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Database cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Location for the Yandex Database cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Location []LocationParameters `json:"location,omitempty" tf:"location,omitempty"` + + // Location ID for the Yandex Database cluster. + // +kubebuilder:validation:Optional + LocationID *string `json:"locationId,omitempty" tf:"location_id,omitempty"` + + // Name of the Yandex Database cluster. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the network to attach the Yandex Database cluster to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Network + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Reference to a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDRef *v1.Reference `json:"networkIdRef,omitempty" tf:"-"` + + // Selector for a Network in vpc to populate networkId. + // +kubebuilder:validation:Optional + NetworkIDSelector *v1.Selector `json:"networkIdSelector,omitempty" tf:"-"` + + // The Yandex Database cluster preset. Available presets can be obtained via yc ydb resource-preset list command. + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` + + // Scaling policy for the Yandex Database cluster. The structure is documented below. + // +kubebuilder:validation:Optional + ScalePolicy []ScalePolicyParameters `json:"scalePolicy,omitempty" tf:"scale_policy,omitempty"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + + // A list of storage configuration options for the Yandex Database cluster. The structure is documented below. + // +kubebuilder:validation:Optional + StorageConfig []StorageConfigParameters `json:"storageConfig,omitempty" tf:"storage_config,omitempty"` + + // List of subnet IDs to attach the Yandex Database cluster to. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in vpc to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` +} + +type FixedScaleInitParameters struct { + + // Number of instances for the Yandex Database cluster. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type FixedScaleObservation struct { + + // Number of instances for the Yandex Database cluster. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type FixedScaleParameters struct { + + // Number of instances for the Yandex Database cluster. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` +} + +type LocationInitParameters struct { + + // Region for the Yandex Database cluster. The structure is documented below. + Region []RegionInitParameters `json:"region,omitempty" tf:"region,omitempty"` +} + +type LocationObservation struct { + + // Region for the Yandex Database cluster. The structure is documented below. + Region []RegionObservation `json:"region,omitempty" tf:"region,omitempty"` +} + +type LocationParameters struct { + + // Region for the Yandex Database cluster. The structure is documented below. + // +kubebuilder:validation:Optional + Region []RegionParameters `json:"region,omitempty" tf:"region,omitempty"` +} + +type RegionInitParameters struct { + + // Region ID for the Yandex Database cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type RegionObservation struct { + + // Region ID for the Yandex Database cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type RegionParameters struct { + + // Region ID for the Yandex Database cluster. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` +} + +type ScalePolicyInitParameters struct { + + // Fixed scaling policy for the Yandex Database cluster. The structure is documented below. + FixedScale []FixedScaleInitParameters `json:"fixedScale,omitempty" tf:"fixed_scale,omitempty"` +} + +type ScalePolicyObservation struct { + + // Fixed scaling policy for the Yandex Database cluster. The structure is documented below. + FixedScale []FixedScaleObservation `json:"fixedScale,omitempty" tf:"fixed_scale,omitempty"` +} + +type ScalePolicyParameters struct { + + // Fixed scaling policy for the Yandex Database cluster. The structure is documented below. + // +kubebuilder:validation:Optional + FixedScale []FixedScaleParameters `json:"fixedScale" tf:"fixed_scale,omitempty"` +} + +type StorageConfigInitParameters struct { + + // Amount of storage groups of selected type for the Yandex Database cluster. + GroupCount *float64 `json:"groupCount,omitempty" tf:"group_count,omitempty"` + + // Storage type ID for the Yandex Database cluster. Available presets can be obtained via yc ydb storage-type list command. + StorageTypeID *string `json:"storageTypeId,omitempty" tf:"storage_type_id,omitempty"` +} + +type StorageConfigObservation struct { + + // Amount of storage groups of selected type for the Yandex Database cluster. + GroupCount *float64 `json:"groupCount,omitempty" tf:"group_count,omitempty"` + + // Storage type ID for the Yandex Database cluster. Available presets can be obtained via yc ydb storage-type list command. + StorageTypeID *string `json:"storageTypeId,omitempty" tf:"storage_type_id,omitempty"` +} + +type StorageConfigParameters struct { + + // Amount of storage groups of selected type for the Yandex Database cluster. + // +kubebuilder:validation:Optional + GroupCount *float64 `json:"groupCount" tf:"group_count,omitempty"` + + // Storage type ID for the Yandex Database cluster. Available presets can be obtained via yc ydb storage-type list command. + // +kubebuilder:validation:Optional + StorageTypeID *string `json:"storageTypeId" tf:"storage_type_id,omitempty"` +} + +// DatabaseDedicatedSpec defines the desired state of DatabaseDedicated +type DatabaseDedicatedSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DatabaseDedicatedParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DatabaseDedicatedInitParameters `json:"initProvider,omitempty"` +} + +// DatabaseDedicatedStatus defines the observed state of DatabaseDedicated. +type DatabaseDedicatedStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DatabaseDedicatedObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// DatabaseDedicated is the Schema for the DatabaseDedicateds API. Manages Yandex Database dedicated cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type DatabaseDedicated struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resourcePresetId) || (has(self.initProvider) && has(self.initProvider.resourcePresetId))",message="spec.forProvider.resourcePresetId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scalePolicy) || (has(self.initProvider) && has(self.initProvider.scalePolicy))",message="spec.forProvider.scalePolicy is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageConfig) || (has(self.initProvider) && has(self.initProvider.storageConfig))",message="spec.forProvider.storageConfig is a required parameter" + Spec DatabaseDedicatedSpec `json:"spec"` + Status DatabaseDedicatedStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DatabaseDedicatedList contains a list of DatabaseDedicateds +type DatabaseDedicatedList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DatabaseDedicated `json:"items"` +} + +// Repository type metadata. +var ( + DatabaseDedicated_Kind = "DatabaseDedicated" + DatabaseDedicated_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DatabaseDedicated_Kind}.String() + DatabaseDedicated_KindAPIVersion = DatabaseDedicated_Kind + "." + CRDGroupVersion.String() + DatabaseDedicated_GroupVersionKind = CRDGroupVersion.WithKind(DatabaseDedicated_Kind) +) + +func init() { + SchemeBuilder.Register(&DatabaseDedicated{}, &DatabaseDedicatedList{}) +} diff --git a/apis/ydb/v1alpha1/zz_databaseiambinding_terraformed.go b/apis/ydb/v1alpha1/zz_databaseiambinding_terraformed.go new file mode 100755 index 0000000..24f256e --- /dev/null +++ b/apis/ydb/v1alpha1/zz_databaseiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DatabaseIAMBinding +func (mg *DatabaseIAMBinding) GetTerraformResourceType() string { + return "yandex_ydb_database_iam_binding" +} + +// GetConnectionDetailsMapping for this DatabaseIAMBinding +func (tr *DatabaseIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DatabaseIAMBinding +func (tr *DatabaseIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DatabaseIAMBinding +func (tr *DatabaseIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DatabaseIAMBinding +func (tr *DatabaseIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DatabaseIAMBinding +func (tr *DatabaseIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DatabaseIAMBinding +func (tr *DatabaseIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DatabaseIAMBinding +func (tr *DatabaseIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DatabaseIAMBinding +func (tr *DatabaseIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DatabaseIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DatabaseIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &DatabaseIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DatabaseIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ydb/v1alpha1/zz_databaseiambinding_types.go b/apis/ydb/v1alpha1/zz_databaseiambinding_types.go new file mode 100755 index 0000000..600e0f0 --- /dev/null +++ b/apis/ydb/v1alpha1/zz_databaseiambinding_types.go @@ -0,0 +1,165 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DatabaseIAMBindingInitParameters struct { + + // The Managed Service YDB instance Database ID to apply a binding to. + // +crossplane:generate:reference:type=DatabaseServerless + DatabaseID *string `json:"databaseId,omitempty" tf:"database_id,omitempty"` + + // Reference to a DatabaseServerless to populate databaseId. + // +kubebuilder:validation:Optional + DatabaseIDRef *v1.Reference `json:"databaseIdRef,omitempty" tf:"-"` + + // Selector for a DatabaseServerless to populate databaseId. + // +kubebuilder:validation:Optional + DatabaseIDSelector *v1.Selector `json:"databaseIdSelector,omitempty" tf:"-"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type DatabaseIAMBindingObservation struct { + + // The Managed Service YDB instance Database ID to apply a binding to. + DatabaseID *string `json:"databaseId,omitempty" tf:"database_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type DatabaseIAMBindingParameters struct { + + // The Managed Service YDB instance Database ID to apply a binding to. + // +crossplane:generate:reference:type=DatabaseServerless + // +kubebuilder:validation:Optional + DatabaseID *string `json:"databaseId,omitempty" tf:"database_id,omitempty"` + + // Reference to a DatabaseServerless to populate databaseId. + // +kubebuilder:validation:Optional + DatabaseIDRef *v1.Reference `json:"databaseIdRef,omitempty" tf:"-"` + + // Selector for a DatabaseServerless to populate databaseId. + // +kubebuilder:validation:Optional + DatabaseIDSelector *v1.Selector `json:"databaseIdSelector,omitempty" tf:"-"` + + // Identities that will be granted the privilege in role. Each entry can have one of the following values: + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/tagesjump/provider-upjet-yc/config/iam.ServiceAccountRefValue() + // +crossplane:generate:reference:refFieldName=ServiceAccountRef + // +crossplane:generate:reference:selectorFieldName=ServiceAccountSelector + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // References to ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountRef []v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // Selector for a list of ServiceAccount in iam to populate members. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// DatabaseIAMBindingSpec defines the desired state of DatabaseIAMBinding +type DatabaseIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DatabaseIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DatabaseIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// DatabaseIAMBindingStatus defines the observed state of DatabaseIAMBinding. +type DatabaseIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DatabaseIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// DatabaseIAMBinding is the Schema for the DatabaseIAMBindings API. Allows management of a single IAM binding for a +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type DatabaseIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec DatabaseIAMBindingSpec `json:"spec"` + Status DatabaseIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DatabaseIAMBindingList contains a list of DatabaseIAMBindings +type DatabaseIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DatabaseIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + DatabaseIAMBinding_Kind = "DatabaseIAMBinding" + DatabaseIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DatabaseIAMBinding_Kind}.String() + DatabaseIAMBinding_KindAPIVersion = DatabaseIAMBinding_Kind + "." + CRDGroupVersion.String() + DatabaseIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(DatabaseIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&DatabaseIAMBinding{}, &DatabaseIAMBindingList{}) +} diff --git a/apis/ydb/v1alpha1/zz_databaseserverless_terraformed.go b/apis/ydb/v1alpha1/zz_databaseserverless_terraformed.go new file mode 100755 index 0000000..b84005e --- /dev/null +++ b/apis/ydb/v1alpha1/zz_databaseserverless_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DatabaseServerless +func (mg *DatabaseServerless) GetTerraformResourceType() string { + return "yandex_ydb_database_serverless" +} + +// GetConnectionDetailsMapping for this DatabaseServerless +func (tr *DatabaseServerless) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DatabaseServerless +func (tr *DatabaseServerless) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DatabaseServerless +func (tr *DatabaseServerless) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DatabaseServerless +func (tr *DatabaseServerless) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DatabaseServerless +func (tr *DatabaseServerless) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DatabaseServerless +func (tr *DatabaseServerless) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DatabaseServerless +func (tr *DatabaseServerless) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DatabaseServerless +func (tr *DatabaseServerless) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DatabaseServerless using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DatabaseServerless) LateInitialize(attrs []byte) (bool, error) { + params := &DatabaseServerlessParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DatabaseServerless) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ydb/v1alpha1/zz_databaseserverless_types.go b/apis/ydb/v1alpha1/zz_databaseserverless_types.go new file mode 100755 index 0000000..b19716c --- /dev/null +++ b/apis/ydb/v1alpha1/zz_databaseserverless_types.go @@ -0,0 +1,235 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DatabaseServerlessInitParameters struct { + + // Inhibits deletion of the database. Can be either true or false + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // A description for the Yandex Database serverless cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the Yandex Database serverless cluster belongs to. It will be deduced from provider configuration if not set explicitly. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Database serverless cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Location ID for the Yandex Database serverless cluster. + LocationID *string `json:"locationId,omitempty" tf:"location_id,omitempty"` + + // Name for the Yandex Database serverless cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + ServerlessDatabase []ServerlessDatabaseInitParameters `json:"serverlessDatabase,omitempty" tf:"serverless_database,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type DatabaseServerlessObservation struct { + + // The Yandex Database serverless cluster creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Full database path of the Yandex Database serverless cluster. Useful for SDK configuration. + DatabasePath *string `json:"databasePath,omitempty" tf:"database_path,omitempty"` + + // Inhibits deletion of the database. Can be either true or false + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // A description for the Yandex Database serverless cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Document API endpoint of the Yandex Database serverless cluster. + DocumentAPIEndpoint *string `json:"documentApiEndpoint,omitempty" tf:"document_api_endpoint,omitempty"` + + // ID of the folder that the Yandex Database serverless cluster belongs to. It will be deduced from provider configuration if not set explicitly. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // ID of the Yandex Database serverless cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Database serverless cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Location ID for the Yandex Database serverless cluster. + LocationID *string `json:"locationId,omitempty" tf:"location_id,omitempty"` + + // Name for the Yandex Database serverless cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + ServerlessDatabase []ServerlessDatabaseObservation `json:"serverlessDatabase,omitempty" tf:"serverless_database,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` + + // Status of the Yandex Database serverless cluster. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Whether TLS is enabled for the Yandex Database serverless cluster. Useful for SDK configuration. + TLSEnabled *bool `json:"tlsEnabled,omitempty" tf:"tls_enabled,omitempty"` + + // API endpoint of the Yandex Database serverless cluster. Useful for SDK configuration. + YdbAPIEndpoint *string `json:"ydbApiEndpoint,omitempty" tf:"ydb_api_endpoint,omitempty"` + + // Full endpoint of the Yandex Database serverless cluster. + YdbFullEndpoint *string `json:"ydbFullEndpoint,omitempty" tf:"ydb_full_endpoint,omitempty"` +} + +type DatabaseServerlessParameters struct { + + // Inhibits deletion of the database. Can be either true or false + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // A description for the Yandex Database serverless cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder that the Yandex Database serverless cluster belongs to. It will be deduced from provider configuration if not set explicitly. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Database serverless cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Location ID for the Yandex Database serverless cluster. + // +kubebuilder:validation:Optional + LocationID *string `json:"locationId,omitempty" tf:"location_id,omitempty"` + + // Name for the Yandex Database serverless cluster. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + ServerlessDatabase []ServerlessDatabaseParameters `json:"serverlessDatabase,omitempty" tf:"serverless_database,omitempty"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type ServerlessDatabaseInitParameters struct { + EnableThrottlingRcuLimit *bool `json:"enableThrottlingRcuLimit,omitempty" tf:"enable_throttling_rcu_limit,omitempty"` + + ProvisionedRcuLimit *float64 `json:"provisionedRcuLimit,omitempty" tf:"provisioned_rcu_limit,omitempty"` + + StorageSizeLimit *float64 `json:"storageSizeLimit,omitempty" tf:"storage_size_limit,omitempty"` + + ThrottlingRcuLimit *float64 `json:"throttlingRcuLimit,omitempty" tf:"throttling_rcu_limit,omitempty"` +} + +type ServerlessDatabaseObservation struct { + EnableThrottlingRcuLimit *bool `json:"enableThrottlingRcuLimit,omitempty" tf:"enable_throttling_rcu_limit,omitempty"` + + ProvisionedRcuLimit *float64 `json:"provisionedRcuLimit,omitempty" tf:"provisioned_rcu_limit,omitempty"` + + StorageSizeLimit *float64 `json:"storageSizeLimit,omitempty" tf:"storage_size_limit,omitempty"` + + ThrottlingRcuLimit *float64 `json:"throttlingRcuLimit,omitempty" tf:"throttling_rcu_limit,omitempty"` +} + +type ServerlessDatabaseParameters struct { + + // +kubebuilder:validation:Optional + EnableThrottlingRcuLimit *bool `json:"enableThrottlingRcuLimit,omitempty" tf:"enable_throttling_rcu_limit,omitempty"` + + // +kubebuilder:validation:Optional + ProvisionedRcuLimit *float64 `json:"provisionedRcuLimit,omitempty" tf:"provisioned_rcu_limit,omitempty"` + + // +kubebuilder:validation:Optional + StorageSizeLimit *float64 `json:"storageSizeLimit,omitempty" tf:"storage_size_limit,omitempty"` + + // +kubebuilder:validation:Optional + ThrottlingRcuLimit *float64 `json:"throttlingRcuLimit,omitempty" tf:"throttling_rcu_limit,omitempty"` +} + +// DatabaseServerlessSpec defines the desired state of DatabaseServerless +type DatabaseServerlessSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DatabaseServerlessParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DatabaseServerlessInitParameters `json:"initProvider,omitempty"` +} + +// DatabaseServerlessStatus defines the observed state of DatabaseServerless. +type DatabaseServerlessStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DatabaseServerlessObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// DatabaseServerless is the Schema for the DatabaseServerlesss API. Manages Yandex Database serverless cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type DatabaseServerless struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec DatabaseServerlessSpec `json:"spec"` + Status DatabaseServerlessStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DatabaseServerlessList contains a list of DatabaseServerlesss +type DatabaseServerlessList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DatabaseServerless `json:"items"` +} + +// Repository type metadata. +var ( + DatabaseServerless_Kind = "DatabaseServerless" + DatabaseServerless_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DatabaseServerless_Kind}.String() + DatabaseServerless_KindAPIVersion = DatabaseServerless_Kind + "." + CRDGroupVersion.String() + DatabaseServerless_GroupVersionKind = CRDGroupVersion.WithKind(DatabaseServerless_Kind) +) + +func init() { + SchemeBuilder.Register(&DatabaseServerless{}, &DatabaseServerlessList{}) +} diff --git a/apis/ydb/v1alpha1/zz_generated.conversion_hubs.go b/apis/ydb/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..fe9b73f --- /dev/null +++ b/apis/ydb/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,24 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *DatabaseDedicated) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DatabaseIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DatabaseServerless) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Table) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *TableChangefeed) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *TableIndex) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Topic) Hub() {} diff --git a/apis/ydb/v1alpha1/zz_generated.deepcopy.go b/apis/ydb/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..7b3c872 --- /dev/null +++ b/apis/ydb/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,3645 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnInitParameters) DeepCopyInto(out *ColumnInitParameters) { + *out = *in + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotNull != nil { + in, out := &in.NotNull, &out.NotNull + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnInitParameters. +func (in *ColumnInitParameters) DeepCopy() *ColumnInitParameters { + if in == nil { + return nil + } + out := new(ColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnObservation) DeepCopyInto(out *ColumnObservation) { + *out = *in + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotNull != nil { + in, out := &in.NotNull, &out.NotNull + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnObservation. +func (in *ColumnObservation) DeepCopy() *ColumnObservation { + if in == nil { + return nil + } + out := new(ColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnParameters) DeepCopyInto(out *ColumnParameters) { + *out = *in + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotNull != nil { + in, out := &in.NotNull, &out.NotNull + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnParameters. +func (in *ColumnParameters) DeepCopy() *ColumnParameters { + if in == nil { + return nil + } + out := new(ColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerInitParameters) DeepCopyInto(out *ConsumerInitParameters) { + *out = *in + if in.Important != nil { + in, out := &in.Important, &out.Important + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StartingMessageTimestampMs != nil { + in, out := &in.StartingMessageTimestampMs, &out.StartingMessageTimestampMs + *out = new(float64) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerInitParameters. +func (in *ConsumerInitParameters) DeepCopy() *ConsumerInitParameters { + if in == nil { + return nil + } + out := new(ConsumerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerObservation) DeepCopyInto(out *ConsumerObservation) { + *out = *in + if in.Important != nil { + in, out := &in.Important, &out.Important + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StartingMessageTimestampMs != nil { + in, out := &in.StartingMessageTimestampMs, &out.StartingMessageTimestampMs + *out = new(float64) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerObservation. +func (in *ConsumerObservation) DeepCopy() *ConsumerObservation { + if in == nil { + return nil + } + out := new(ConsumerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerParameters) DeepCopyInto(out *ConsumerParameters) { + *out = *in + if in.Important != nil { + in, out := &in.Important, &out.Important + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StartingMessageTimestampMs != nil { + in, out := &in.StartingMessageTimestampMs, &out.StartingMessageTimestampMs + *out = new(float64) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerParameters. +func (in *ConsumerParameters) DeepCopy() *ConsumerParameters { + if in == nil { + return nil + } + out := new(ConsumerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseDedicated) DeepCopyInto(out *DatabaseDedicated) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseDedicated. +func (in *DatabaseDedicated) DeepCopy() *DatabaseDedicated { + if in == nil { + return nil + } + out := new(DatabaseDedicated) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseDedicated) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseDedicatedInitParameters) DeepCopyInto(out *DatabaseDedicatedInitParameters) { + *out = *in + if in.AssignPublicIps != nil { + in, out := &in.AssignPublicIps, &out.AssignPublicIps + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]LocationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LocationID != nil { + in, out := &in.LocationID, &out.LocationID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = make([]StorageConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseDedicatedInitParameters. +func (in *DatabaseDedicatedInitParameters) DeepCopy() *DatabaseDedicatedInitParameters { + if in == nil { + return nil + } + out := new(DatabaseDedicatedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseDedicatedList) DeepCopyInto(out *DatabaseDedicatedList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DatabaseDedicated, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseDedicatedList. +func (in *DatabaseDedicatedList) DeepCopy() *DatabaseDedicatedList { + if in == nil { + return nil + } + out := new(DatabaseDedicatedList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseDedicatedList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseDedicatedObservation) DeepCopyInto(out *DatabaseDedicatedObservation) { + *out = *in + if in.AssignPublicIps != nil { + in, out := &in.AssignPublicIps, &out.AssignPublicIps + *out = new(bool) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DatabasePath != nil { + in, out := &in.DatabasePath, &out.DatabasePath + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]LocationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LocationID != nil { + in, out := &in.LocationID, &out.LocationID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = make([]StorageConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TLSEnabled != nil { + in, out := &in.TLSEnabled, &out.TLSEnabled + *out = new(bool) + **out = **in + } + if in.YdbAPIEndpoint != nil { + in, out := &in.YdbAPIEndpoint, &out.YdbAPIEndpoint + *out = new(string) + **out = **in + } + if in.YdbFullEndpoint != nil { + in, out := &in.YdbFullEndpoint, &out.YdbFullEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseDedicatedObservation. +func (in *DatabaseDedicatedObservation) DeepCopy() *DatabaseDedicatedObservation { + if in == nil { + return nil + } + out := new(DatabaseDedicatedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseDedicatedParameters) DeepCopyInto(out *DatabaseDedicatedParameters) { + *out = *in + if in.AssignPublicIps != nil { + in, out := &in.AssignPublicIps, &out.AssignPublicIps + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]LocationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LocationID != nil { + in, out := &in.LocationID, &out.LocationID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.NetworkIDRef != nil { + in, out := &in.NetworkIDRef, &out.NetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkIDSelector != nil { + in, out := &in.NetworkIDSelector, &out.NetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = make([]StorageConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseDedicatedParameters. +func (in *DatabaseDedicatedParameters) DeepCopy() *DatabaseDedicatedParameters { + if in == nil { + return nil + } + out := new(DatabaseDedicatedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseDedicatedSpec) DeepCopyInto(out *DatabaseDedicatedSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseDedicatedSpec. +func (in *DatabaseDedicatedSpec) DeepCopy() *DatabaseDedicatedSpec { + if in == nil { + return nil + } + out := new(DatabaseDedicatedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseDedicatedStatus) DeepCopyInto(out *DatabaseDedicatedStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseDedicatedStatus. +func (in *DatabaseDedicatedStatus) DeepCopy() *DatabaseDedicatedStatus { + if in == nil { + return nil + } + out := new(DatabaseDedicatedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseIAMBinding) DeepCopyInto(out *DatabaseIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseIAMBinding. +func (in *DatabaseIAMBinding) DeepCopy() *DatabaseIAMBinding { + if in == nil { + return nil + } + out := new(DatabaseIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseIAMBindingInitParameters) DeepCopyInto(out *DatabaseIAMBindingInitParameters) { + *out = *in + if in.DatabaseID != nil { + in, out := &in.DatabaseID, &out.DatabaseID + *out = new(string) + **out = **in + } + if in.DatabaseIDRef != nil { + in, out := &in.DatabaseIDRef, &out.DatabaseIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseIDSelector != nil { + in, out := &in.DatabaseIDSelector, &out.DatabaseIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseIAMBindingInitParameters. +func (in *DatabaseIAMBindingInitParameters) DeepCopy() *DatabaseIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(DatabaseIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseIAMBindingList) DeepCopyInto(out *DatabaseIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DatabaseIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseIAMBindingList. +func (in *DatabaseIAMBindingList) DeepCopy() *DatabaseIAMBindingList { + if in == nil { + return nil + } + out := new(DatabaseIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseIAMBindingObservation) DeepCopyInto(out *DatabaseIAMBindingObservation) { + *out = *in + if in.DatabaseID != nil { + in, out := &in.DatabaseID, &out.DatabaseID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseIAMBindingObservation. +func (in *DatabaseIAMBindingObservation) DeepCopy() *DatabaseIAMBindingObservation { + if in == nil { + return nil + } + out := new(DatabaseIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseIAMBindingParameters) DeepCopyInto(out *DatabaseIAMBindingParameters) { + *out = *in + if in.DatabaseID != nil { + in, out := &in.DatabaseID, &out.DatabaseID + *out = new(string) + **out = **in + } + if in.DatabaseIDRef != nil { + in, out := &in.DatabaseIDRef, &out.DatabaseIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseIDSelector != nil { + in, out := &in.DatabaseIDSelector, &out.DatabaseIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseIAMBindingParameters. +func (in *DatabaseIAMBindingParameters) DeepCopy() *DatabaseIAMBindingParameters { + if in == nil { + return nil + } + out := new(DatabaseIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseIAMBindingSpec) DeepCopyInto(out *DatabaseIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseIAMBindingSpec. +func (in *DatabaseIAMBindingSpec) DeepCopy() *DatabaseIAMBindingSpec { + if in == nil { + return nil + } + out := new(DatabaseIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseIAMBindingStatus) DeepCopyInto(out *DatabaseIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseIAMBindingStatus. +func (in *DatabaseIAMBindingStatus) DeepCopy() *DatabaseIAMBindingStatus { + if in == nil { + return nil + } + out := new(DatabaseIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseServerless) DeepCopyInto(out *DatabaseServerless) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseServerless. +func (in *DatabaseServerless) DeepCopy() *DatabaseServerless { + if in == nil { + return nil + } + out := new(DatabaseServerless) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseServerless) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseServerlessInitParameters) DeepCopyInto(out *DatabaseServerlessInitParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LocationID != nil { + in, out := &in.LocationID, &out.LocationID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServerlessDatabase != nil { + in, out := &in.ServerlessDatabase, &out.ServerlessDatabase + *out = make([]ServerlessDatabaseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseServerlessInitParameters. +func (in *DatabaseServerlessInitParameters) DeepCopy() *DatabaseServerlessInitParameters { + if in == nil { + return nil + } + out := new(DatabaseServerlessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseServerlessList) DeepCopyInto(out *DatabaseServerlessList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DatabaseServerless, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseServerlessList. +func (in *DatabaseServerlessList) DeepCopy() *DatabaseServerlessList { + if in == nil { + return nil + } + out := new(DatabaseServerlessList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseServerlessList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseServerlessObservation) DeepCopyInto(out *DatabaseServerlessObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DatabasePath != nil { + in, out := &in.DatabasePath, &out.DatabasePath + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentAPIEndpoint != nil { + in, out := &in.DocumentAPIEndpoint, &out.DocumentAPIEndpoint + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LocationID != nil { + in, out := &in.LocationID, &out.LocationID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServerlessDatabase != nil { + in, out := &in.ServerlessDatabase, &out.ServerlessDatabase + *out = make([]ServerlessDatabaseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TLSEnabled != nil { + in, out := &in.TLSEnabled, &out.TLSEnabled + *out = new(bool) + **out = **in + } + if in.YdbAPIEndpoint != nil { + in, out := &in.YdbAPIEndpoint, &out.YdbAPIEndpoint + *out = new(string) + **out = **in + } + if in.YdbFullEndpoint != nil { + in, out := &in.YdbFullEndpoint, &out.YdbFullEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseServerlessObservation. +func (in *DatabaseServerlessObservation) DeepCopy() *DatabaseServerlessObservation { + if in == nil { + return nil + } + out := new(DatabaseServerlessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseServerlessParameters) DeepCopyInto(out *DatabaseServerlessParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LocationID != nil { + in, out := &in.LocationID, &out.LocationID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServerlessDatabase != nil { + in, out := &in.ServerlessDatabase, &out.ServerlessDatabase + *out = make([]ServerlessDatabaseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseServerlessParameters. +func (in *DatabaseServerlessParameters) DeepCopy() *DatabaseServerlessParameters { + if in == nil { + return nil + } + out := new(DatabaseServerlessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseServerlessSpec) DeepCopyInto(out *DatabaseServerlessSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseServerlessSpec. +func (in *DatabaseServerlessSpec) DeepCopy() *DatabaseServerlessSpec { + if in == nil { + return nil + } + out := new(DatabaseServerlessSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseServerlessStatus) DeepCopyInto(out *DatabaseServerlessStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseServerlessStatus. +func (in *DatabaseServerlessStatus) DeepCopy() *DatabaseServerlessStatus { + if in == nil { + return nil + } + out := new(DatabaseServerlessStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FamilyInitParameters) DeepCopyInto(out *FamilyInitParameters) { + *out = *in + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FamilyInitParameters. +func (in *FamilyInitParameters) DeepCopy() *FamilyInitParameters { + if in == nil { + return nil + } + out := new(FamilyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FamilyObservation) DeepCopyInto(out *FamilyObservation) { + *out = *in + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FamilyObservation. +func (in *FamilyObservation) DeepCopy() *FamilyObservation { + if in == nil { + return nil + } + out := new(FamilyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FamilyParameters) DeepCopyInto(out *FamilyParameters) { + *out = *in + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FamilyParameters. +func (in *FamilyParameters) DeepCopy() *FamilyParameters { + if in == nil { + return nil + } + out := new(FamilyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleInitParameters) DeepCopyInto(out *FixedScaleInitParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleInitParameters. +func (in *FixedScaleInitParameters) DeepCopy() *FixedScaleInitParameters { + if in == nil { + return nil + } + out := new(FixedScaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleObservation) DeepCopyInto(out *FixedScaleObservation) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleObservation. +func (in *FixedScaleObservation) DeepCopy() *FixedScaleObservation { + if in == nil { + return nil + } + out := new(FixedScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleParameters) DeepCopyInto(out *FixedScaleParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleParameters. +func (in *FixedScaleParameters) DeepCopy() *FixedScaleParameters { + if in == nil { + return nil + } + out := new(FixedScaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationInitParameters) DeepCopyInto(out *LocationInitParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = make([]RegionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationInitParameters. +func (in *LocationInitParameters) DeepCopy() *LocationInitParameters { + if in == nil { + return nil + } + out := new(LocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationObservation) DeepCopyInto(out *LocationObservation) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = make([]RegionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationObservation. +func (in *LocationObservation) DeepCopy() *LocationObservation { + if in == nil { + return nil + } + out := new(LocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationParameters) DeepCopyInto(out *LocationParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = make([]RegionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationParameters. +func (in *LocationParameters) DeepCopy() *LocationParameters { + if in == nil { + return nil + } + out := new(LocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionAtKeysInitParameters) DeepCopyInto(out *PartitionAtKeysInitParameters) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionAtKeysInitParameters. +func (in *PartitionAtKeysInitParameters) DeepCopy() *PartitionAtKeysInitParameters { + if in == nil { + return nil + } + out := new(PartitionAtKeysInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionAtKeysObservation) DeepCopyInto(out *PartitionAtKeysObservation) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionAtKeysObservation. +func (in *PartitionAtKeysObservation) DeepCopy() *PartitionAtKeysObservation { + if in == nil { + return nil + } + out := new(PartitionAtKeysObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionAtKeysParameters) DeepCopyInto(out *PartitionAtKeysParameters) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionAtKeysParameters. +func (in *PartitionAtKeysParameters) DeepCopy() *PartitionAtKeysParameters { + if in == nil { + return nil + } + out := new(PartitionAtKeysParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitioningSettingsInitParameters) DeepCopyInto(out *PartitioningSettingsInitParameters) { + *out = *in + if in.AutoPartitioningByLoad != nil { + in, out := &in.AutoPartitioningByLoad, &out.AutoPartitioningByLoad + *out = new(bool) + **out = **in + } + if in.AutoPartitioningBySizeEnabled != nil { + in, out := &in.AutoPartitioningBySizeEnabled, &out.AutoPartitioningBySizeEnabled + *out = new(bool) + **out = **in + } + if in.AutoPartitioningMaxPartitionsCount != nil { + in, out := &in.AutoPartitioningMaxPartitionsCount, &out.AutoPartitioningMaxPartitionsCount + *out = new(float64) + **out = **in + } + if in.AutoPartitioningMinPartitionsCount != nil { + in, out := &in.AutoPartitioningMinPartitionsCount, &out.AutoPartitioningMinPartitionsCount + *out = new(float64) + **out = **in + } + if in.AutoPartitioningPartitionSizeMb != nil { + in, out := &in.AutoPartitioningPartitionSizeMb, &out.AutoPartitioningPartitionSizeMb + *out = new(float64) + **out = **in + } + if in.PartitionAtKeys != nil { + in, out := &in.PartitionAtKeys, &out.PartitionAtKeys + *out = make([]PartitionAtKeysInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UniformPartitions != nil { + in, out := &in.UniformPartitions, &out.UniformPartitions + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitioningSettingsInitParameters. +func (in *PartitioningSettingsInitParameters) DeepCopy() *PartitioningSettingsInitParameters { + if in == nil { + return nil + } + out := new(PartitioningSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitioningSettingsObservation) DeepCopyInto(out *PartitioningSettingsObservation) { + *out = *in + if in.AutoPartitioningByLoad != nil { + in, out := &in.AutoPartitioningByLoad, &out.AutoPartitioningByLoad + *out = new(bool) + **out = **in + } + if in.AutoPartitioningBySizeEnabled != nil { + in, out := &in.AutoPartitioningBySizeEnabled, &out.AutoPartitioningBySizeEnabled + *out = new(bool) + **out = **in + } + if in.AutoPartitioningMaxPartitionsCount != nil { + in, out := &in.AutoPartitioningMaxPartitionsCount, &out.AutoPartitioningMaxPartitionsCount + *out = new(float64) + **out = **in + } + if in.AutoPartitioningMinPartitionsCount != nil { + in, out := &in.AutoPartitioningMinPartitionsCount, &out.AutoPartitioningMinPartitionsCount + *out = new(float64) + **out = **in + } + if in.AutoPartitioningPartitionSizeMb != nil { + in, out := &in.AutoPartitioningPartitionSizeMb, &out.AutoPartitioningPartitionSizeMb + *out = new(float64) + **out = **in + } + if in.PartitionAtKeys != nil { + in, out := &in.PartitionAtKeys, &out.PartitionAtKeys + *out = make([]PartitionAtKeysObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UniformPartitions != nil { + in, out := &in.UniformPartitions, &out.UniformPartitions + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitioningSettingsObservation. +func (in *PartitioningSettingsObservation) DeepCopy() *PartitioningSettingsObservation { + if in == nil { + return nil + } + out := new(PartitioningSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitioningSettingsParameters) DeepCopyInto(out *PartitioningSettingsParameters) { + *out = *in + if in.AutoPartitioningByLoad != nil { + in, out := &in.AutoPartitioningByLoad, &out.AutoPartitioningByLoad + *out = new(bool) + **out = **in + } + if in.AutoPartitioningBySizeEnabled != nil { + in, out := &in.AutoPartitioningBySizeEnabled, &out.AutoPartitioningBySizeEnabled + *out = new(bool) + **out = **in + } + if in.AutoPartitioningMaxPartitionsCount != nil { + in, out := &in.AutoPartitioningMaxPartitionsCount, &out.AutoPartitioningMaxPartitionsCount + *out = new(float64) + **out = **in + } + if in.AutoPartitioningMinPartitionsCount != nil { + in, out := &in.AutoPartitioningMinPartitionsCount, &out.AutoPartitioningMinPartitionsCount + *out = new(float64) + **out = **in + } + if in.AutoPartitioningPartitionSizeMb != nil { + in, out := &in.AutoPartitioningPartitionSizeMb, &out.AutoPartitioningPartitionSizeMb + *out = new(float64) + **out = **in + } + if in.PartitionAtKeys != nil { + in, out := &in.PartitionAtKeys, &out.PartitionAtKeys + *out = make([]PartitionAtKeysParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UniformPartitions != nil { + in, out := &in.UniformPartitions, &out.UniformPartitions + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitioningSettingsParameters. +func (in *PartitioningSettingsParameters) DeepCopy() *PartitioningSettingsParameters { + if in == nil { + return nil + } + out := new(PartitioningSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionInitParameters) DeepCopyInto(out *RegionInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionInitParameters. +func (in *RegionInitParameters) DeepCopy() *RegionInitParameters { + if in == nil { + return nil + } + out := new(RegionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionObservation) DeepCopyInto(out *RegionObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionObservation. +func (in *RegionObservation) DeepCopy() *RegionObservation { + if in == nil { + return nil + } + out := new(RegionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionParameters) DeepCopyInto(out *RegionParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionParameters. +func (in *RegionParameters) DeepCopy() *RegionParameters { + if in == nil { + return nil + } + out := new(RegionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalePolicyInitParameters) DeepCopyInto(out *ScalePolicyInitParameters) { + *out = *in + if in.FixedScale != nil { + in, out := &in.FixedScale, &out.FixedScale + *out = make([]FixedScaleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalePolicyInitParameters. +func (in *ScalePolicyInitParameters) DeepCopy() *ScalePolicyInitParameters { + if in == nil { + return nil + } + out := new(ScalePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalePolicyObservation) DeepCopyInto(out *ScalePolicyObservation) { + *out = *in + if in.FixedScale != nil { + in, out := &in.FixedScale, &out.FixedScale + *out = make([]FixedScaleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalePolicyObservation. +func (in *ScalePolicyObservation) DeepCopy() *ScalePolicyObservation { + if in == nil { + return nil + } + out := new(ScalePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalePolicyParameters) DeepCopyInto(out *ScalePolicyParameters) { + *out = *in + if in.FixedScale != nil { + in, out := &in.FixedScale, &out.FixedScale + *out = make([]FixedScaleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalePolicyParameters. +func (in *ScalePolicyParameters) DeepCopy() *ScalePolicyParameters { + if in == nil { + return nil + } + out := new(ScalePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessDatabaseInitParameters) DeepCopyInto(out *ServerlessDatabaseInitParameters) { + *out = *in + if in.EnableThrottlingRcuLimit != nil { + in, out := &in.EnableThrottlingRcuLimit, &out.EnableThrottlingRcuLimit + *out = new(bool) + **out = **in + } + if in.ProvisionedRcuLimit != nil { + in, out := &in.ProvisionedRcuLimit, &out.ProvisionedRcuLimit + *out = new(float64) + **out = **in + } + if in.StorageSizeLimit != nil { + in, out := &in.StorageSizeLimit, &out.StorageSizeLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRcuLimit != nil { + in, out := &in.ThrottlingRcuLimit, &out.ThrottlingRcuLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessDatabaseInitParameters. +func (in *ServerlessDatabaseInitParameters) DeepCopy() *ServerlessDatabaseInitParameters { + if in == nil { + return nil + } + out := new(ServerlessDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessDatabaseObservation) DeepCopyInto(out *ServerlessDatabaseObservation) { + *out = *in + if in.EnableThrottlingRcuLimit != nil { + in, out := &in.EnableThrottlingRcuLimit, &out.EnableThrottlingRcuLimit + *out = new(bool) + **out = **in + } + if in.ProvisionedRcuLimit != nil { + in, out := &in.ProvisionedRcuLimit, &out.ProvisionedRcuLimit + *out = new(float64) + **out = **in + } + if in.StorageSizeLimit != nil { + in, out := &in.StorageSizeLimit, &out.StorageSizeLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRcuLimit != nil { + in, out := &in.ThrottlingRcuLimit, &out.ThrottlingRcuLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessDatabaseObservation. +func (in *ServerlessDatabaseObservation) DeepCopy() *ServerlessDatabaseObservation { + if in == nil { + return nil + } + out := new(ServerlessDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessDatabaseParameters) DeepCopyInto(out *ServerlessDatabaseParameters) { + *out = *in + if in.EnableThrottlingRcuLimit != nil { + in, out := &in.EnableThrottlingRcuLimit, &out.EnableThrottlingRcuLimit + *out = new(bool) + **out = **in + } + if in.ProvisionedRcuLimit != nil { + in, out := &in.ProvisionedRcuLimit, &out.ProvisionedRcuLimit + *out = new(float64) + **out = **in + } + if in.StorageSizeLimit != nil { + in, out := &in.StorageSizeLimit, &out.StorageSizeLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRcuLimit != nil { + in, out := &in.ThrottlingRcuLimit, &out.ThrottlingRcuLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessDatabaseParameters. +func (in *ServerlessDatabaseParameters) DeepCopy() *ServerlessDatabaseParameters { + if in == nil { + return nil + } + out := new(ServerlessDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfigInitParameters) DeepCopyInto(out *StorageConfigInitParameters) { + *out = *in + if in.GroupCount != nil { + in, out := &in.GroupCount, &out.GroupCount + *out = new(float64) + **out = **in + } + if in.StorageTypeID != nil { + in, out := &in.StorageTypeID, &out.StorageTypeID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfigInitParameters. +func (in *StorageConfigInitParameters) DeepCopy() *StorageConfigInitParameters { + if in == nil { + return nil + } + out := new(StorageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfigObservation) DeepCopyInto(out *StorageConfigObservation) { + *out = *in + if in.GroupCount != nil { + in, out := &in.GroupCount, &out.GroupCount + *out = new(float64) + **out = **in + } + if in.StorageTypeID != nil { + in, out := &in.StorageTypeID, &out.StorageTypeID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfigObservation. +func (in *StorageConfigObservation) DeepCopy() *StorageConfigObservation { + if in == nil { + return nil + } + out := new(StorageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfigParameters) DeepCopyInto(out *StorageConfigParameters) { + *out = *in + if in.GroupCount != nil { + in, out := &in.GroupCount, &out.GroupCount + *out = new(float64) + **out = **in + } + if in.StorageTypeID != nil { + in, out := &in.StorageTypeID, &out.StorageTypeID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfigParameters. +func (in *StorageConfigParameters) DeepCopy() *StorageConfigParameters { + if in == nil { + return nil + } + out := new(StorageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLInitParameters) DeepCopyInto(out *TTLInitParameters) { + *out = *in + if in.ColumnName != nil { + in, out := &in.ColumnName, &out.ColumnName + *out = new(string) + **out = **in + } + if in.ExpireInterval != nil { + in, out := &in.ExpireInterval, &out.ExpireInterval + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLInitParameters. +func (in *TTLInitParameters) DeepCopy() *TTLInitParameters { + if in == nil { + return nil + } + out := new(TTLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLObservation) DeepCopyInto(out *TTLObservation) { + *out = *in + if in.ColumnName != nil { + in, out := &in.ColumnName, &out.ColumnName + *out = new(string) + **out = **in + } + if in.ExpireInterval != nil { + in, out := &in.ExpireInterval, &out.ExpireInterval + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLObservation. +func (in *TTLObservation) DeepCopy() *TTLObservation { + if in == nil { + return nil + } + out := new(TTLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLParameters) DeepCopyInto(out *TTLParameters) { + *out = *in + if in.ColumnName != nil { + in, out := &in.ColumnName, &out.ColumnName + *out = new(string) + **out = **in + } + if in.ExpireInterval != nil { + in, out := &in.ExpireInterval, &out.ExpireInterval + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLParameters. +func (in *TTLParameters) DeepCopy() *TTLParameters { + if in == nil { + return nil + } + out := new(TTLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableChangefeed) DeepCopyInto(out *TableChangefeed) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableChangefeed. +func (in *TableChangefeed) DeepCopy() *TableChangefeed { + if in == nil { + return nil + } + out := new(TableChangefeed) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableChangefeed) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableChangefeedInitParameters) DeepCopyInto(out *TableChangefeedInitParameters) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Consumer != nil { + in, out := &in.Consumer, &out.Consumer + *out = make([]ConsumerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(string) + **out = **in + } + if in.TableID != nil { + in, out := &in.TableID, &out.TableID + *out = new(string) + **out = **in + } + if in.TablePath != nil { + in, out := &in.TablePath, &out.TablePath + *out = new(string) + **out = **in + } + if in.VirtualTimestamps != nil { + in, out := &in.VirtualTimestamps, &out.VirtualTimestamps + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableChangefeedInitParameters. +func (in *TableChangefeedInitParameters) DeepCopy() *TableChangefeedInitParameters { + if in == nil { + return nil + } + out := new(TableChangefeedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableChangefeedList) DeepCopyInto(out *TableChangefeedList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TableChangefeed, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableChangefeedList. +func (in *TableChangefeedList) DeepCopy() *TableChangefeedList { + if in == nil { + return nil + } + out := new(TableChangefeedList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableChangefeedList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableChangefeedObservation) DeepCopyInto(out *TableChangefeedObservation) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Consumer != nil { + in, out := &in.Consumer, &out.Consumer + *out = make([]ConsumerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(string) + **out = **in + } + if in.TableID != nil { + in, out := &in.TableID, &out.TableID + *out = new(string) + **out = **in + } + if in.TablePath != nil { + in, out := &in.TablePath, &out.TablePath + *out = new(string) + **out = **in + } + if in.VirtualTimestamps != nil { + in, out := &in.VirtualTimestamps, &out.VirtualTimestamps + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableChangefeedObservation. +func (in *TableChangefeedObservation) DeepCopy() *TableChangefeedObservation { + if in == nil { + return nil + } + out := new(TableChangefeedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableChangefeedParameters) DeepCopyInto(out *TableChangefeedParameters) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Consumer != nil { + in, out := &in.Consumer, &out.Consumer + *out = make([]ConsumerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(string) + **out = **in + } + if in.TableID != nil { + in, out := &in.TableID, &out.TableID + *out = new(string) + **out = **in + } + if in.TablePath != nil { + in, out := &in.TablePath, &out.TablePath + *out = new(string) + **out = **in + } + if in.VirtualTimestamps != nil { + in, out := &in.VirtualTimestamps, &out.VirtualTimestamps + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableChangefeedParameters. +func (in *TableChangefeedParameters) DeepCopy() *TableChangefeedParameters { + if in == nil { + return nil + } + out := new(TableChangefeedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableChangefeedSpec) DeepCopyInto(out *TableChangefeedSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableChangefeedSpec. +func (in *TableChangefeedSpec) DeepCopy() *TableChangefeedSpec { + if in == nil { + return nil + } + out := new(TableChangefeedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableChangefeedStatus) DeepCopyInto(out *TableChangefeedStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableChangefeedStatus. +func (in *TableChangefeedStatus) DeepCopy() *TableChangefeedStatus { + if in == nil { + return nil + } + out := new(TableChangefeedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableIndex) DeepCopyInto(out *TableIndex) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableIndex. +func (in *TableIndex) DeepCopy() *TableIndex { + if in == nil { + return nil + } + out := new(TableIndex) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableIndex) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableIndexInitParameters) DeepCopyInto(out *TableIndexInitParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Cover != nil { + in, out := &in.Cover, &out.Cover + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TableID != nil { + in, out := &in.TableID, &out.TableID + *out = new(string) + **out = **in + } + if in.TablePath != nil { + in, out := &in.TablePath, &out.TablePath + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableIndexInitParameters. +func (in *TableIndexInitParameters) DeepCopy() *TableIndexInitParameters { + if in == nil { + return nil + } + out := new(TableIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableIndexList) DeepCopyInto(out *TableIndexList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TableIndex, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableIndexList. +func (in *TableIndexList) DeepCopy() *TableIndexList { + if in == nil { + return nil + } + out := new(TableIndexList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableIndexList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableIndexObservation) DeepCopyInto(out *TableIndexObservation) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Cover != nil { + in, out := &in.Cover, &out.Cover + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TableID != nil { + in, out := &in.TableID, &out.TableID + *out = new(string) + **out = **in + } + if in.TablePath != nil { + in, out := &in.TablePath, &out.TablePath + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableIndexObservation. +func (in *TableIndexObservation) DeepCopy() *TableIndexObservation { + if in == nil { + return nil + } + out := new(TableIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableIndexParameters) DeepCopyInto(out *TableIndexParameters) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Cover != nil { + in, out := &in.Cover, &out.Cover + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TableID != nil { + in, out := &in.TableID, &out.TableID + *out = new(string) + **out = **in + } + if in.TablePath != nil { + in, out := &in.TablePath, &out.TablePath + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableIndexParameters. +func (in *TableIndexParameters) DeepCopy() *TableIndexParameters { + if in == nil { + return nil + } + out := new(TableIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableIndexSpec) DeepCopyInto(out *TableIndexSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableIndexSpec. +func (in *TableIndexSpec) DeepCopy() *TableIndexSpec { + if in == nil { + return nil + } + out := new(TableIndexSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableIndexStatus) DeepCopyInto(out *TableIndexStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableIndexStatus. +func (in *TableIndexStatus) DeepCopy() *TableIndexStatus { + if in == nil { + return nil + } + out := new(TableIndexStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.ConnectionStringRef != nil { + in, out := &in.ConnectionStringRef, &out.ConnectionStringRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionStringSelector != nil { + in, out := &in.ConnectionStringSelector, &out.ConnectionStringSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = make([]FamilyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyBloomFilter != nil { + in, out := &in.KeyBloomFilter, &out.KeyBloomFilter + *out = new(bool) + **out = **in + } + if in.PartitioningSettings != nil { + in, out := &in.PartitioningSettings, &out.PartitioningSettings + *out = make([]PartitioningSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PrimaryKey != nil { + in, out := &in.PrimaryKey, &out.PrimaryKey + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReadReplicasSettings != nil { + in, out := &in.ReadReplicasSettings, &out.ReadReplicasSettings + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = make([]TTLInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableInitParameters. +func (in *TableInitParameters) DeepCopy() *TableInitParameters { + if in == nil { + return nil + } + out := new(TableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableList) DeepCopyInto(out *TableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Table, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableList. +func (in *TableList) DeepCopy() *TableList { + if in == nil { + return nil + } + out := new(TableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableObservation) DeepCopyInto(out *TableObservation) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = make([]FamilyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyBloomFilter != nil { + in, out := &in.KeyBloomFilter, &out.KeyBloomFilter + *out = new(bool) + **out = **in + } + if in.PartitioningSettings != nil { + in, out := &in.PartitioningSettings, &out.PartitioningSettings + *out = make([]PartitioningSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PrimaryKey != nil { + in, out := &in.PrimaryKey, &out.PrimaryKey + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReadReplicasSettings != nil { + in, out := &in.ReadReplicasSettings, &out.ReadReplicasSettings + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = make([]TTLObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableObservation. +func (in *TableObservation) DeepCopy() *TableObservation { + if in == nil { + return nil + } + out := new(TableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableParameters) DeepCopyInto(out *TableParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.ConnectionStringRef != nil { + in, out := &in.ConnectionStringRef, &out.ConnectionStringRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionStringSelector != nil { + in, out := &in.ConnectionStringSelector, &out.ConnectionStringSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = make([]FamilyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyBloomFilter != nil { + in, out := &in.KeyBloomFilter, &out.KeyBloomFilter + *out = new(bool) + **out = **in + } + if in.PartitioningSettings != nil { + in, out := &in.PartitioningSettings, &out.PartitioningSettings + *out = make([]PartitioningSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PrimaryKey != nil { + in, out := &in.PrimaryKey, &out.PrimaryKey + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReadReplicasSettings != nil { + in, out := &in.ReadReplicasSettings, &out.ReadReplicasSettings + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = make([]TTLParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableParameters. +func (in *TableParameters) DeepCopy() *TableParameters { + if in == nil { + return nil + } + out := new(TableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSpec) DeepCopyInto(out *TableSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSpec. +func (in *TableSpec) DeepCopy() *TableSpec { + if in == nil { + return nil + } + out := new(TableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableStatus) DeepCopyInto(out *TableStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableStatus. +func (in *TableStatus) DeepCopy() *TableStatus { + if in == nil { + return nil + } + out := new(TableStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Topic) DeepCopyInto(out *Topic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topic. +func (in *Topic) DeepCopy() *Topic { + if in == nil { + return nil + } + out := new(Topic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Topic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicConsumerInitParameters) DeepCopyInto(out *TopicConsumerInitParameters) { + *out = *in + if in.Important != nil { + in, out := &in.Important, &out.Important + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StartingMessageTimestampMs != nil { + in, out := &in.StartingMessageTimestampMs, &out.StartingMessageTimestampMs + *out = new(float64) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicConsumerInitParameters. +func (in *TopicConsumerInitParameters) DeepCopy() *TopicConsumerInitParameters { + if in == nil { + return nil + } + out := new(TopicConsumerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicConsumerObservation) DeepCopyInto(out *TopicConsumerObservation) { + *out = *in + if in.Important != nil { + in, out := &in.Important, &out.Important + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StartingMessageTimestampMs != nil { + in, out := &in.StartingMessageTimestampMs, &out.StartingMessageTimestampMs + *out = new(float64) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicConsumerObservation. +func (in *TopicConsumerObservation) DeepCopy() *TopicConsumerObservation { + if in == nil { + return nil + } + out := new(TopicConsumerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicConsumerParameters) DeepCopyInto(out *TopicConsumerParameters) { + *out = *in + if in.Important != nil { + in, out := &in.Important, &out.Important + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StartingMessageTimestampMs != nil { + in, out := &in.StartingMessageTimestampMs, &out.StartingMessageTimestampMs + *out = new(float64) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicConsumerParameters. +func (in *TopicConsumerParameters) DeepCopy() *TopicConsumerParameters { + if in == nil { + return nil + } + out := new(TopicConsumerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInitParameters) DeepCopyInto(out *TopicInitParameters) { + *out = *in + if in.Consumer != nil { + in, out := &in.Consumer, &out.Consumer + *out = make([]TopicConsumerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DatabaseEndpoint != nil { + in, out := &in.DatabaseEndpoint, &out.DatabaseEndpoint + *out = new(string) + **out = **in + } + if in.DatabaseEndpointRef != nil { + in, out := &in.DatabaseEndpointRef, &out.DatabaseEndpointRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseEndpointSelector != nil { + in, out := &in.DatabaseEndpointSelector, &out.DatabaseEndpointSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MeteringMode != nil { + in, out := &in.MeteringMode, &out.MeteringMode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionWriteSpeedKbps != nil { + in, out := &in.PartitionWriteSpeedKbps, &out.PartitionWriteSpeedKbps + *out = new(float64) + **out = **in + } + if in.PartitionsCount != nil { + in, out := &in.PartitionsCount, &out.PartitionsCount + *out = new(float64) + **out = **in + } + if in.RetentionPeriodHours != nil { + in, out := &in.RetentionPeriodHours, &out.RetentionPeriodHours + *out = new(float64) + **out = **in + } + if in.RetentionStorageMb != nil { + in, out := &in.RetentionStorageMb, &out.RetentionStorageMb + *out = new(float64) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInitParameters. +func (in *TopicInitParameters) DeepCopy() *TopicInitParameters { + if in == nil { + return nil + } + out := new(TopicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicList) DeepCopyInto(out *TopicList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Topic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicList. +func (in *TopicList) DeepCopy() *TopicList { + if in == nil { + return nil + } + out := new(TopicList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TopicList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicObservation) DeepCopyInto(out *TopicObservation) { + *out = *in + if in.Consumer != nil { + in, out := &in.Consumer, &out.Consumer + *out = make([]TopicConsumerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DatabaseEndpoint != nil { + in, out := &in.DatabaseEndpoint, &out.DatabaseEndpoint + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MeteringMode != nil { + in, out := &in.MeteringMode, &out.MeteringMode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionWriteSpeedKbps != nil { + in, out := &in.PartitionWriteSpeedKbps, &out.PartitionWriteSpeedKbps + *out = new(float64) + **out = **in + } + if in.PartitionsCount != nil { + in, out := &in.PartitionsCount, &out.PartitionsCount + *out = new(float64) + **out = **in + } + if in.RetentionPeriodHours != nil { + in, out := &in.RetentionPeriodHours, &out.RetentionPeriodHours + *out = new(float64) + **out = **in + } + if in.RetentionStorageMb != nil { + in, out := &in.RetentionStorageMb, &out.RetentionStorageMb + *out = new(float64) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicObservation. +func (in *TopicObservation) DeepCopy() *TopicObservation { + if in == nil { + return nil + } + out := new(TopicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicParameters) DeepCopyInto(out *TopicParameters) { + *out = *in + if in.Consumer != nil { + in, out := &in.Consumer, &out.Consumer + *out = make([]TopicConsumerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DatabaseEndpoint != nil { + in, out := &in.DatabaseEndpoint, &out.DatabaseEndpoint + *out = new(string) + **out = **in + } + if in.DatabaseEndpointRef != nil { + in, out := &in.DatabaseEndpointRef, &out.DatabaseEndpointRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseEndpointSelector != nil { + in, out := &in.DatabaseEndpointSelector, &out.DatabaseEndpointSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MeteringMode != nil { + in, out := &in.MeteringMode, &out.MeteringMode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionWriteSpeedKbps != nil { + in, out := &in.PartitionWriteSpeedKbps, &out.PartitionWriteSpeedKbps + *out = new(float64) + **out = **in + } + if in.PartitionsCount != nil { + in, out := &in.PartitionsCount, &out.PartitionsCount + *out = new(float64) + **out = **in + } + if in.RetentionPeriodHours != nil { + in, out := &in.RetentionPeriodHours, &out.RetentionPeriodHours + *out = new(float64) + **out = **in + } + if in.RetentionStorageMb != nil { + in, out := &in.RetentionStorageMb, &out.RetentionStorageMb + *out = new(float64) + **out = **in + } + if in.SupportedCodecs != nil { + in, out := &in.SupportedCodecs, &out.SupportedCodecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicParameters. +func (in *TopicParameters) DeepCopy() *TopicParameters { + if in == nil { + return nil + } + out := new(TopicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicSpec) DeepCopyInto(out *TopicSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicSpec. +func (in *TopicSpec) DeepCopy() *TopicSpec { + if in == nil { + return nil + } + out := new(TopicSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicStatus) DeepCopyInto(out *TopicStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicStatus. +func (in *TopicStatus) DeepCopy() *TopicStatus { + if in == nil { + return nil + } + out := new(TopicStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ydb/v1alpha1/zz_generated.resolvers.go b/apis/ydb/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..64cbe23 --- /dev/null +++ b/apis/ydb/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,323 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + iam "github.com/tagesjump/provider-upjet-yc/config/iam" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this DatabaseDedicated. +func (mg *DatabaseDedicated) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkIDRef, + Selector: mg.Spec.ForProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkID") + } + mg.Spec.ForProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetIdsRefs, + Selector: mg.Spec.ForProvider.SubnetIdsSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetIds") + } + mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetIdsRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkIDRef, + Selector: mg.Spec.InitProvider.NetworkIDSelector, + To: reference.To{ + List: &v1alpha11.NetworkList{}, + Managed: &v1alpha11.Network{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkID") + } + mg.Spec.InitProvider.NetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetIdsRefs, + Selector: mg.Spec.InitProvider.SubnetIdsSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetIds") + } + mg.Spec.InitProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this DatabaseIAMBinding. +func (mg *DatabaseIAMBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DatabaseID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DatabaseIDRef, + Selector: mg.Spec.ForProvider.DatabaseIDSelector, + To: reference.To{ + List: &DatabaseServerlessList{}, + Managed: &DatabaseServerless{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DatabaseID") + } + mg.Spec.ForProvider.DatabaseID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DatabaseIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.ForProvider.ServiceAccountRef, + Selector: mg.Spec.ForProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Members") + } + mg.Spec.ForProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ServiceAccountRef = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DatabaseID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DatabaseIDRef, + Selector: mg.Spec.InitProvider.DatabaseIDSelector, + To: reference.To{ + List: &DatabaseServerlessList{}, + Managed: &DatabaseServerless{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DatabaseID") + } + mg.Spec.InitProvider.DatabaseID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DatabaseIDRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Members), + Extract: iam.ServiceAccountRefValue(), + References: mg.Spec.InitProvider.ServiceAccountRef, + Selector: mg.Spec.InitProvider.ServiceAccountSelector, + To: reference.To{ + List: &v1alpha12.ServiceAccountList{}, + Managed: &v1alpha12.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Members") + } + mg.Spec.InitProvider.Members = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ServiceAccountRef = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this DatabaseServerless. +func (mg *DatabaseServerless) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Table. +func (mg *Table) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ConnectionString), + Extract: resource.ExtractParamPath("ydb_full_endpoint", true), + Reference: mg.Spec.ForProvider.ConnectionStringRef, + Selector: mg.Spec.ForProvider.ConnectionStringSelector, + To: reference.To{ + List: &DatabaseServerlessList{}, + Managed: &DatabaseServerless{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ConnectionString") + } + mg.Spec.ForProvider.ConnectionString = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ConnectionStringRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ConnectionString), + Extract: resource.ExtractParamPath("ydb_full_endpoint", true), + Reference: mg.Spec.InitProvider.ConnectionStringRef, + Selector: mg.Spec.InitProvider.ConnectionStringSelector, + To: reference.To{ + List: &DatabaseServerlessList{}, + Managed: &DatabaseServerless{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ConnectionString") + } + mg.Spec.InitProvider.ConnectionString = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ConnectionStringRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Topic. +func (mg *Topic) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DatabaseEndpoint), + Extract: resource.ExtractParamPath("ydb_full_endpoint", true), + Reference: mg.Spec.ForProvider.DatabaseEndpointRef, + Selector: mg.Spec.ForProvider.DatabaseEndpointSelector, + To: reference.To{ + List: &DatabaseServerlessList{}, + Managed: &DatabaseServerless{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DatabaseEndpoint") + } + mg.Spec.ForProvider.DatabaseEndpoint = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DatabaseEndpointRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DatabaseEndpoint), + Extract: resource.ExtractParamPath("ydb_full_endpoint", true), + Reference: mg.Spec.InitProvider.DatabaseEndpointRef, + Selector: mg.Spec.InitProvider.DatabaseEndpointSelector, + To: reference.To{ + List: &DatabaseServerlessList{}, + Managed: &DatabaseServerless{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DatabaseEndpoint") + } + mg.Spec.InitProvider.DatabaseEndpoint = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DatabaseEndpointRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/ydb/v1alpha1/zz_groupversion_info.go b/apis/ydb/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..fb319a2 --- /dev/null +++ b/apis/ydb/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ydb.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ydb.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ydb/v1alpha1/zz_table_terraformed.go b/apis/ydb/v1alpha1/zz_table_terraformed.go new file mode 100755 index 0000000..f180f6c --- /dev/null +++ b/apis/ydb/v1alpha1/zz_table_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Table +func (mg *Table) GetTerraformResourceType() string { + return "yandex_ydb_table" +} + +// GetConnectionDetailsMapping for this Table +func (tr *Table) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Table +func (tr *Table) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Table +func (tr *Table) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Table +func (tr *Table) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Table +func (tr *Table) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Table +func (tr *Table) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Table +func (tr *Table) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Table +func (tr *Table) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Table using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Table) LateInitialize(attrs []byte) (bool, error) { + params := &TableParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Table) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ydb/v1alpha1/zz_table_types.go b/apis/ydb/v1alpha1/zz_table_types.go new file mode 100755 index 0000000..5287673 --- /dev/null +++ b/apis/ydb/v1alpha1/zz_table_types.go @@ -0,0 +1,402 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ColumnInitParameters struct { + + // Column group + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // Column name + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A column cannot have the NULL data type. ( Default: false ) + NotNull *bool `json:"notNull,omitempty" tf:"not_null,omitempty"` + + // Column data type. YQL data types are used. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnObservation struct { + + // Column group + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // Column name + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A column cannot have the NULL data type. ( Default: false ) + NotNull *bool `json:"notNull,omitempty" tf:"not_null,omitempty"` + + // Column data type. YQL data types are used. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnParameters struct { + + // Column group + // +kubebuilder:validation:Optional + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // Column name + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A column cannot have the NULL data type. ( Default: false ) + // +kubebuilder:validation:Optional + NotNull *bool `json:"notNull,omitempty" tf:"not_null,omitempty"` + + // Column data type. YQL data types are used. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type FamilyInitParameters struct { + + // Data codec (acceptable values: off, lz4). + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // Type of storage device for column data in this group (acceptable values: ssd, rot (from HDD spindle rotation)). + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // Column family name + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type FamilyObservation struct { + + // Data codec (acceptable values: off, lz4). + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // Type of storage device for column data in this group (acceptable values: ssd, rot (from HDD spindle rotation)). + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // Column family name + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type FamilyParameters struct { + + // Data codec (acceptable values: off, lz4). + // +kubebuilder:validation:Optional + Compression *string `json:"compression" tf:"compression,omitempty"` + + // Type of storage device for column data in this group (acceptable values: ssd, rot (from HDD spindle rotation)). + // +kubebuilder:validation:Optional + Data *string `json:"data" tf:"data,omitempty"` + + // Column family name + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type PartitionAtKeysInitParameters struct { + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` +} + +type PartitionAtKeysObservation struct { + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` +} + +type PartitionAtKeysParameters struct { + + // +kubebuilder:validation:Optional + Keys []*string `json:"keys" tf:"keys,omitempty"` +} + +type PartitioningSettingsInitParameters struct { + AutoPartitioningByLoad *bool `json:"autoPartitioningByLoad,omitempty" tf:"auto_partitioning_by_load,omitempty"` + + AutoPartitioningBySizeEnabled *bool `json:"autoPartitioningBySizeEnabled,omitempty" tf:"auto_partitioning_by_size_enabled,omitempty"` + + AutoPartitioningMaxPartitionsCount *float64 `json:"autoPartitioningMaxPartitionsCount,omitempty" tf:"auto_partitioning_max_partitions_count,omitempty"` + + AutoPartitioningMinPartitionsCount *float64 `json:"autoPartitioningMinPartitionsCount,omitempty" tf:"auto_partitioning_min_partitions_count,omitempty"` + + AutoPartitioningPartitionSizeMb *float64 `json:"autoPartitioningPartitionSizeMb,omitempty" tf:"auto_partitioning_partition_size_mb,omitempty"` + + PartitionAtKeys []PartitionAtKeysInitParameters `json:"partitionAtKeys,omitempty" tf:"partition_at_keys,omitempty"` + + UniformPartitions *float64 `json:"uniformPartitions,omitempty" tf:"uniform_partitions,omitempty"` +} + +type PartitioningSettingsObservation struct { + AutoPartitioningByLoad *bool `json:"autoPartitioningByLoad,omitempty" tf:"auto_partitioning_by_load,omitempty"` + + AutoPartitioningBySizeEnabled *bool `json:"autoPartitioningBySizeEnabled,omitempty" tf:"auto_partitioning_by_size_enabled,omitempty"` + + AutoPartitioningMaxPartitionsCount *float64 `json:"autoPartitioningMaxPartitionsCount,omitempty" tf:"auto_partitioning_max_partitions_count,omitempty"` + + AutoPartitioningMinPartitionsCount *float64 `json:"autoPartitioningMinPartitionsCount,omitempty" tf:"auto_partitioning_min_partitions_count,omitempty"` + + AutoPartitioningPartitionSizeMb *float64 `json:"autoPartitioningPartitionSizeMb,omitempty" tf:"auto_partitioning_partition_size_mb,omitempty"` + + PartitionAtKeys []PartitionAtKeysObservation `json:"partitionAtKeys,omitempty" tf:"partition_at_keys,omitempty"` + + UniformPartitions *float64 `json:"uniformPartitions,omitempty" tf:"uniform_partitions,omitempty"` +} + +type PartitioningSettingsParameters struct { + + // +kubebuilder:validation:Optional + AutoPartitioningByLoad *bool `json:"autoPartitioningByLoad,omitempty" tf:"auto_partitioning_by_load,omitempty"` + + // +kubebuilder:validation:Optional + AutoPartitioningBySizeEnabled *bool `json:"autoPartitioningBySizeEnabled,omitempty" tf:"auto_partitioning_by_size_enabled,omitempty"` + + // +kubebuilder:validation:Optional + AutoPartitioningMaxPartitionsCount *float64 `json:"autoPartitioningMaxPartitionsCount,omitempty" tf:"auto_partitioning_max_partitions_count,omitempty"` + + // +kubebuilder:validation:Optional + AutoPartitioningMinPartitionsCount *float64 `json:"autoPartitioningMinPartitionsCount,omitempty" tf:"auto_partitioning_min_partitions_count,omitempty"` + + // +kubebuilder:validation:Optional + AutoPartitioningPartitionSizeMb *float64 `json:"autoPartitioningPartitionSizeMb,omitempty" tf:"auto_partitioning_partition_size_mb,omitempty"` + + // +kubebuilder:validation:Optional + PartitionAtKeys []PartitionAtKeysParameters `json:"partitionAtKeys,omitempty" tf:"partition_at_keys,omitempty"` + + // +kubebuilder:validation:Optional + UniformPartitions *float64 `json:"uniformPartitions,omitempty" tf:"uniform_partitions,omitempty"` +} + +type TTLInitParameters struct { + + // Column name for TTL + ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` + + // Interval in the ISO 8601 format + ExpireInterval *string `json:"expireInterval,omitempty" tf:"expire_interval,omitempty"` + + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type TTLObservation struct { + + // Column name for TTL + ColumnName *string `json:"columnName,omitempty" tf:"column_name,omitempty"` + + // Interval in the ISO 8601 format + ExpireInterval *string `json:"expireInterval,omitempty" tf:"expire_interval,omitempty"` + + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type TTLParameters struct { + + // Column name for TTL + // +kubebuilder:validation:Optional + ColumnName *string `json:"columnName" tf:"column_name,omitempty"` + + // Interval in the ISO 8601 format + // +kubebuilder:validation:Optional + ExpireInterval *string `json:"expireInterval" tf:"expire_interval,omitempty"` + + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type TableInitParameters struct { + + // A map of table attributes. + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // A list of column configuration options. The structure is documented below. + Column []ColumnInitParameters `json:"column,omitempty" tf:"column,omitempty"` + + // Connection string for database. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1.DatabaseServerless + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("ydb_full_endpoint",true) + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Reference to a DatabaseServerless in ydb to populate connectionString. + // +kubebuilder:validation:Optional + ConnectionStringRef *v1.Reference `json:"connectionStringRef,omitempty" tf:"-"` + + // Selector for a DatabaseServerless in ydb to populate connectionString. + // +kubebuilder:validation:Optional + ConnectionStringSelector *v1.Selector `json:"connectionStringSelector,omitempty" tf:"-"` + + // A list of column group configuration options. The structure is documented below. + Family []FamilyInitParameters `json:"family,omitempty" tf:"family,omitempty"` + + // Use the Bloom filter for the primary key + KeyBloomFilter *bool `json:"keyBloomFilter,omitempty" tf:"key_bloom_filter,omitempty"` + + // Table partiotioning settings The structure is documented below. + PartitioningSettings []PartitioningSettingsInitParameters `json:"partitioningSettings,omitempty" tf:"partitioning_settings,omitempty"` + + // Table path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A list of table columns to be uased as primary key. + PrimaryKey []*string `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` + + // Read replication settings + ReadReplicasSettings *string `json:"readReplicasSettings,omitempty" tf:"read_replicas_settings,omitempty"` + + // ttl TTL settings The structure is documented below. + TTL []TTLInitParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type TableObservation struct { + + // A map of table attributes. + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // A list of column configuration options. The structure is documented below. + Column []ColumnObservation `json:"column,omitempty" tf:"column,omitempty"` + + // Connection string for database. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // A list of column group configuration options. The structure is documented below. + Family []FamilyObservation `json:"family,omitempty" tf:"family,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Use the Bloom filter for the primary key + KeyBloomFilter *bool `json:"keyBloomFilter,omitempty" tf:"key_bloom_filter,omitempty"` + + // Table partiotioning settings The structure is documented below. + PartitioningSettings []PartitioningSettingsObservation `json:"partitioningSettings,omitempty" tf:"partitioning_settings,omitempty"` + + // Table path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A list of table columns to be uased as primary key. + PrimaryKey []*string `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` + + // Read replication settings + ReadReplicasSettings *string `json:"readReplicasSettings,omitempty" tf:"read_replicas_settings,omitempty"` + + // ttl TTL settings The structure is documented below. + TTL []TTLObservation `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type TableParameters struct { + + // A map of table attributes. + // +kubebuilder:validation:Optional + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // A list of column configuration options. The structure is documented below. + // +kubebuilder:validation:Optional + Column []ColumnParameters `json:"column,omitempty" tf:"column,omitempty"` + + // Connection string for database. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1.DatabaseServerless + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("ydb_full_endpoint",true) + // +kubebuilder:validation:Optional + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Reference to a DatabaseServerless in ydb to populate connectionString. + // +kubebuilder:validation:Optional + ConnectionStringRef *v1.Reference `json:"connectionStringRef,omitempty" tf:"-"` + + // Selector for a DatabaseServerless in ydb to populate connectionString. + // +kubebuilder:validation:Optional + ConnectionStringSelector *v1.Selector `json:"connectionStringSelector,omitempty" tf:"-"` + + // A list of column group configuration options. The structure is documented below. + // +kubebuilder:validation:Optional + Family []FamilyParameters `json:"family,omitempty" tf:"family,omitempty"` + + // Use the Bloom filter for the primary key + // +kubebuilder:validation:Optional + KeyBloomFilter *bool `json:"keyBloomFilter,omitempty" tf:"key_bloom_filter,omitempty"` + + // Table partiotioning settings The structure is documented below. + // +kubebuilder:validation:Optional + PartitioningSettings []PartitioningSettingsParameters `json:"partitioningSettings,omitempty" tf:"partitioning_settings,omitempty"` + + // Table path. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A list of table columns to be uased as primary key. + // +kubebuilder:validation:Optional + PrimaryKey []*string `json:"primaryKey,omitempty" tf:"primary_key,omitempty"` + + // Read replication settings + // +kubebuilder:validation:Optional + ReadReplicasSettings *string `json:"readReplicasSettings,omitempty" tf:"read_replicas_settings,omitempty"` + + // ttl TTL settings The structure is documented below. + // +kubebuilder:validation:Optional + TTL []TTLParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +// TableSpec defines the desired state of Table +type TableSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TableParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TableInitParameters `json:"initProvider,omitempty"` +} + +// TableStatus defines the observed state of Table. +type TableStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TableObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Table is the Schema for the Tables API. Manages Yandex Database dedicated cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Table struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.column) || (has(self.initProvider) && has(self.initProvider.column))",message="spec.forProvider.column is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.path) || (has(self.initProvider) && has(self.initProvider.path))",message="spec.forProvider.path is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.primaryKey) || (has(self.initProvider) && has(self.initProvider.primaryKey))",message="spec.forProvider.primaryKey is a required parameter" + Spec TableSpec `json:"spec"` + Status TableStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TableList contains a list of Tables +type TableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Table `json:"items"` +} + +// Repository type metadata. +var ( + Table_Kind = "Table" + Table_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Table_Kind}.String() + Table_KindAPIVersion = Table_Kind + "." + CRDGroupVersion.String() + Table_GroupVersionKind = CRDGroupVersion.WithKind(Table_Kind) +) + +func init() { + SchemeBuilder.Register(&Table{}, &TableList{}) +} diff --git a/apis/ydb/v1alpha1/zz_tablechangefeed_terraformed.go b/apis/ydb/v1alpha1/zz_tablechangefeed_terraformed.go new file mode 100755 index 0000000..21b6111 --- /dev/null +++ b/apis/ydb/v1alpha1/zz_tablechangefeed_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TableChangefeed +func (mg *TableChangefeed) GetTerraformResourceType() string { + return "yandex_ydb_table_changefeed" +} + +// GetConnectionDetailsMapping for this TableChangefeed +func (tr *TableChangefeed) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TableChangefeed +func (tr *TableChangefeed) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TableChangefeed +func (tr *TableChangefeed) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TableChangefeed +func (tr *TableChangefeed) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TableChangefeed +func (tr *TableChangefeed) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TableChangefeed +func (tr *TableChangefeed) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TableChangefeed +func (tr *TableChangefeed) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TableChangefeed +func (tr *TableChangefeed) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TableChangefeed using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TableChangefeed) LateInitialize(attrs []byte) (bool, error) { + params := &TableChangefeedParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TableChangefeed) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ydb/v1alpha1/zz_tablechangefeed_types.go b/apis/ydb/v1alpha1/zz_tablechangefeed_types.go new file mode 100755 index 0000000..550a4ce --- /dev/null +++ b/apis/ydb/v1alpha1/zz_tablechangefeed_types.go @@ -0,0 +1,222 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConsumerInitParameters struct { + Important *bool `json:"important,omitempty" tf:"important,omitempty"` + + // : Changefeed name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Timestamp in the UNIX timestamp format, from which the consumer will start reading data + StartingMessageTimestampMs *float64 `json:"startingMessageTimestampMs,omitempty" tf:"starting_message_timestamp_ms,omitempty"` + + // Supported data encodings + // +listType=set + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` +} + +type ConsumerObservation struct { + Important *bool `json:"important,omitempty" tf:"important,omitempty"` + + // : Changefeed name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Timestamp in the UNIX timestamp format, from which the consumer will start reading data + StartingMessageTimestampMs *float64 `json:"startingMessageTimestampMs,omitempty" tf:"starting_message_timestamp_ms,omitempty"` + + // Supported data encodings + // +listType=set + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` +} + +type ConsumerParameters struct { + + // +kubebuilder:validation:Optional + Important *bool `json:"important,omitempty" tf:"important,omitempty"` + + // : Changefeed name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Timestamp in the UNIX timestamp format, from which the consumer will start reading data + // +kubebuilder:validation:Optional + StartingMessageTimestampMs *float64 `json:"startingMessageTimestampMs,omitempty" tf:"starting_message_timestamp_ms,omitempty"` + + // Supported data encodings + // +kubebuilder:validation:Optional + // +listType=set + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` +} + +type TableChangefeedInitParameters struct { + + // Connection string, conflicts with table_id + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Changefeed consumers - named entities for reading data from the topic. + Consumer []ConsumerInitParameters `json:"consumer,omitempty" tf:"consumer,omitempty"` + + // : Changefeed format. Only JSON format is available. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // : Changefeed operating mode. The available changefeed operating modes are presented in the documentation. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // : Changefeed name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Time of data retention in the topic, ISO 8601 format + RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // : ID of the table for which we create the changefeed. + TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"` + + // Table path + TablePath *string `json:"tablePath,omitempty" tf:"table_path,omitempty"` + + // Use virtual timestamps + VirtualTimestamps *bool `json:"virtualTimestamps,omitempty" tf:"virtual_timestamps,omitempty"` +} + +type TableChangefeedObservation struct { + + // Connection string, conflicts with table_id + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Changefeed consumers - named entities for reading data from the topic. + Consumer []ConsumerObservation `json:"consumer,omitempty" tf:"consumer,omitempty"` + + // : Changefeed format. Only JSON format is available. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // : Changefeed operating mode. The available changefeed operating modes are presented in the documentation. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // : Changefeed name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Time of data retention in the topic, ISO 8601 format + RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // : ID of the table for which we create the changefeed. + TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"` + + // Table path + TablePath *string `json:"tablePath,omitempty" tf:"table_path,omitempty"` + + // Use virtual timestamps + VirtualTimestamps *bool `json:"virtualTimestamps,omitempty" tf:"virtual_timestamps,omitempty"` +} + +type TableChangefeedParameters struct { + + // Connection string, conflicts with table_id + // +kubebuilder:validation:Optional + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Changefeed consumers - named entities for reading data from the topic. + // +kubebuilder:validation:Optional + Consumer []ConsumerParameters `json:"consumer,omitempty" tf:"consumer,omitempty"` + + // : Changefeed format. Only JSON format is available. + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // : Changefeed operating mode. The available changefeed operating modes are presented in the documentation. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // : Changefeed name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Time of data retention in the topic, ISO 8601 format + // +kubebuilder:validation:Optional + RetentionPeriod *string `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // : ID of the table for which we create the changefeed. + // +kubebuilder:validation:Optional + TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"` + + // Table path + // +kubebuilder:validation:Optional + TablePath *string `json:"tablePath,omitempty" tf:"table_path,omitempty"` + + // Use virtual timestamps + // +kubebuilder:validation:Optional + VirtualTimestamps *bool `json:"virtualTimestamps,omitempty" tf:"virtual_timestamps,omitempty"` +} + +// TableChangefeedSpec defines the desired state of TableChangefeed +type TableChangefeedSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TableChangefeedParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TableChangefeedInitParameters `json:"initProvider,omitempty"` +} + +// TableChangefeedStatus defines the observed state of TableChangefeed. +type TableChangefeedStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TableChangefeedObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// TableChangefeed is the Schema for the TableChangefeeds API. Manages Yandex Database dedicated cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type TableChangefeed struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.format) || (has(self.initProvider) && has(self.initProvider.format))",message="spec.forProvider.format is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.mode) || (has(self.initProvider) && has(self.initProvider.mode))",message="spec.forProvider.mode is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec TableChangefeedSpec `json:"spec"` + Status TableChangefeedStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TableChangefeedList contains a list of TableChangefeeds +type TableChangefeedList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TableChangefeed `json:"items"` +} + +// Repository type metadata. +var ( + TableChangefeed_Kind = "TableChangefeed" + TableChangefeed_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TableChangefeed_Kind}.String() + TableChangefeed_KindAPIVersion = TableChangefeed_Kind + "." + CRDGroupVersion.String() + TableChangefeed_GroupVersionKind = CRDGroupVersion.WithKind(TableChangefeed_Kind) +) + +func init() { + SchemeBuilder.Register(&TableChangefeed{}, &TableChangefeedList{}) +} diff --git a/apis/ydb/v1alpha1/zz_tableindex_terraformed.go b/apis/ydb/v1alpha1/zz_tableindex_terraformed.go new file mode 100755 index 0000000..6c44a95 --- /dev/null +++ b/apis/ydb/v1alpha1/zz_tableindex_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TableIndex +func (mg *TableIndex) GetTerraformResourceType() string { + return "yandex_ydb_table_index" +} + +// GetConnectionDetailsMapping for this TableIndex +func (tr *TableIndex) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TableIndex +func (tr *TableIndex) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TableIndex +func (tr *TableIndex) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TableIndex +func (tr *TableIndex) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TableIndex +func (tr *TableIndex) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TableIndex +func (tr *TableIndex) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TableIndex +func (tr *TableIndex) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TableIndex +func (tr *TableIndex) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TableIndex using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TableIndex) LateInitialize(attrs []byte) (bool, error) { + params := &TableIndexParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TableIndex) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ydb/v1alpha1/zz_tableindex_types.go b/apis/ydb/v1alpha1/zz_tableindex_types.go new file mode 100755 index 0000000..3eb678a --- /dev/null +++ b/apis/ydb/v1alpha1/zz_tableindex_types.go @@ -0,0 +1,156 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TableIndexInitParameters struct { + + // (List of String) + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` + + // (String) + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // (List of String) + Cover []*string `json:"cover,omitempty" tf:"cover,omitempty"` + + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (String) + TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"` + + // (String) + TablePath *string `json:"tablePath,omitempty" tf:"table_path,omitempty"` + + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TableIndexObservation struct { + + // (List of String) + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` + + // (String) + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // (List of String) + Cover []*string `json:"cover,omitempty" tf:"cover,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (String) + TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"` + + // (String) + TablePath *string `json:"tablePath,omitempty" tf:"table_path,omitempty"` + + // (String) + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TableIndexParameters struct { + + // (List of String) + // +kubebuilder:validation:Optional + Columns []*string `json:"columns,omitempty" tf:"columns,omitempty"` + + // (String) + // +kubebuilder:validation:Optional + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // (List of String) + // +kubebuilder:validation:Optional + Cover []*string `json:"cover,omitempty" tf:"cover,omitempty"` + + // (String) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (String) + // +kubebuilder:validation:Optional + TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"` + + // (String) + // +kubebuilder:validation:Optional + TablePath *string `json:"tablePath,omitempty" tf:"table_path,omitempty"` + + // (String) + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// TableIndexSpec defines the desired state of TableIndex +type TableIndexSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TableIndexParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TableIndexInitParameters `json:"initProvider,omitempty"` +} + +// TableIndexStatus defines the observed state of TableIndex. +type TableIndexStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TableIndexObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// TableIndex is the Schema for the TableIndexs API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type TableIndex struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.columns) || (has(self.initProvider) && has(self.initProvider.columns))",message="spec.forProvider.columns is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec TableIndexSpec `json:"spec"` + Status TableIndexStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TableIndexList contains a list of TableIndexs +type TableIndexList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TableIndex `json:"items"` +} + +// Repository type metadata. +var ( + TableIndex_Kind = "TableIndex" + TableIndex_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TableIndex_Kind}.String() + TableIndex_KindAPIVersion = TableIndex_Kind + "." + CRDGroupVersion.String() + TableIndex_GroupVersionKind = CRDGroupVersion.WithKind(TableIndex_Kind) +) + +func init() { + SchemeBuilder.Register(&TableIndex{}, &TableIndexList{}) +} diff --git a/apis/ydb/v1alpha1/zz_topic_terraformed.go b/apis/ydb/v1alpha1/zz_topic_terraformed.go new file mode 100755 index 0000000..9f1d10d --- /dev/null +++ b/apis/ydb/v1alpha1/zz_topic_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Topic +func (mg *Topic) GetTerraformResourceType() string { + return "yandex_ydb_topic" +} + +// GetConnectionDetailsMapping for this Topic +func (tr *Topic) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Topic +func (tr *Topic) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Topic +func (tr *Topic) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Topic +func (tr *Topic) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Topic +func (tr *Topic) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Topic +func (tr *Topic) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Topic +func (tr *Topic) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Topic +func (tr *Topic) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Topic using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Topic) LateInitialize(attrs []byte) (bool, error) { + params := &TopicParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Topic) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ydb/v1alpha1/zz_topic_types.go b/apis/ydb/v1alpha1/zz_topic_types.go new file mode 100755 index 0000000..c654f9f --- /dev/null +++ b/apis/ydb/v1alpha1/zz_topic_types.go @@ -0,0 +1,249 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TopicConsumerInitParameters struct { + + // Defines an important consumer. No data will be deleted from the topic until all the important consumers read them. Value type: boolean, default value: false. + Important *bool `json:"important,omitempty" tf:"important,omitempty"` + + // Topic name. Type: string, required. Default value: "". + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Timestamp in UNIX timestamp format from which the reader will start reading data. Type: integer, optional. Default value: 0. + StartingMessageTimestampMs *float64 `json:"startingMessageTimestampMs,omitempty" tf:"starting_message_timestamp_ms,omitempty"` + + // Supported data encodings. Types: array[string]. Default value: ["gzip", "raw", "zstd"]. + // +listType=set + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` +} + +type TopicConsumerObservation struct { + + // Defines an important consumer. No data will be deleted from the topic until all the important consumers read them. Value type: boolean, default value: false. + Important *bool `json:"important,omitempty" tf:"important,omitempty"` + + // Topic name. Type: string, required. Default value: "". + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Timestamp in UNIX timestamp format from which the reader will start reading data. Type: integer, optional. Default value: 0. + StartingMessageTimestampMs *float64 `json:"startingMessageTimestampMs,omitempty" tf:"starting_message_timestamp_ms,omitempty"` + + // Supported data encodings. Types: array[string]. Default value: ["gzip", "raw", "zstd"]. + // +listType=set + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` +} + +type TopicConsumerParameters struct { + + // Defines an important consumer. No data will be deleted from the topic until all the important consumers read them. Value type: boolean, default value: false. + // +kubebuilder:validation:Optional + Important *bool `json:"important,omitempty" tf:"important,omitempty"` + + // Topic name. Type: string, required. Default value: "". + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Timestamp in UNIX timestamp format from which the reader will start reading data. Type: integer, optional. Default value: 0. + // +kubebuilder:validation:Optional + StartingMessageTimestampMs *float64 `json:"startingMessageTimestampMs,omitempty" tf:"starting_message_timestamp_ms,omitempty"` + + // Supported data encodings. Types: array[string]. Default value: ["gzip", "raw", "zstd"]. + // +kubebuilder:validation:Optional + // +listType=set + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` +} + +type TopicInitParameters struct { + + // Topic Readers. Types: array[consumer], optional. Default value: null. + Consumer []TopicConsumerInitParameters `json:"consumer,omitempty" tf:"consumer,omitempty"` + + // YDB database endpoint. Types: string, required. Default value: "". + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1.DatabaseServerless + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("ydb_full_endpoint",true) + DatabaseEndpoint *string `json:"databaseEndpoint,omitempty" tf:"database_endpoint,omitempty"` + + // Reference to a DatabaseServerless in ydb to populate databaseEndpoint. + // +kubebuilder:validation:Optional + DatabaseEndpointRef *v1.Reference `json:"databaseEndpointRef,omitempty" tf:"-"` + + // Selector for a DatabaseServerless in ydb to populate databaseEndpoint. + // +kubebuilder:validation:Optional + DatabaseEndpointSelector *v1.Selector `json:"databaseEndpointSelector,omitempty" tf:"-"` + + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Resource metering mode (reserved_capacity - based on the allocated resources or request_units - based on actual usage). This option applies to topics in serverless databases. Value type: String. + MeteringMode *string `json:"meteringMode,omitempty" tf:"metering_mode,omitempty"` + + // Topic name. Type: string, required. Default value: "". + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Maximum allowed write speed per partition. If a write speed for a given partition exceeds this value, the write speed will be capped. Value type: integer, default value: 1024 (1MB). + PartitionWriteSpeedKbps *float64 `json:"partitionWriteSpeedKbps,omitempty" tf:"partition_write_speed_kbps,omitempty"` + + // Number of partitions. Types: integer, optional. Default value: 2. + PartitionsCount *float64 `json:"partitionsCount,omitempty" tf:"partitions_count,omitempty"` + + RetentionPeriodHours *float64 `json:"retentionPeriodHours,omitempty" tf:"retention_period_hours,omitempty"` + + RetentionStorageMb *float64 `json:"retentionStorageMb,omitempty" tf:"retention_storage_mb,omitempty"` + + // Supported data encodings. Types: array[string]. Default value: ["gzip", "raw", "zstd"]. + // +listType=set + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` +} + +type TopicObservation struct { + + // Topic Readers. Types: array[consumer], optional. Default value: null. + Consumer []TopicConsumerObservation `json:"consumer,omitempty" tf:"consumer,omitempty"` + + // YDB database endpoint. Types: string, required. Default value: "". + DatabaseEndpoint *string `json:"databaseEndpoint,omitempty" tf:"database_endpoint,omitempty"` + + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Resource metering mode (reserved_capacity - based on the allocated resources or request_units - based on actual usage). This option applies to topics in serverless databases. Value type: String. + MeteringMode *string `json:"meteringMode,omitempty" tf:"metering_mode,omitempty"` + + // Topic name. Type: string, required. Default value: "". + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Maximum allowed write speed per partition. If a write speed for a given partition exceeds this value, the write speed will be capped. Value type: integer, default value: 1024 (1MB). + PartitionWriteSpeedKbps *float64 `json:"partitionWriteSpeedKbps,omitempty" tf:"partition_write_speed_kbps,omitempty"` + + // Number of partitions. Types: integer, optional. Default value: 2. + PartitionsCount *float64 `json:"partitionsCount,omitempty" tf:"partitions_count,omitempty"` + + RetentionPeriodHours *float64 `json:"retentionPeriodHours,omitempty" tf:"retention_period_hours,omitempty"` + + RetentionStorageMb *float64 `json:"retentionStorageMb,omitempty" tf:"retention_storage_mb,omitempty"` + + // Supported data encodings. Types: array[string]. Default value: ["gzip", "raw", "zstd"]. + // +listType=set + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` +} + +type TopicParameters struct { + + // Topic Readers. Types: array[consumer], optional. Default value: null. + // +kubebuilder:validation:Optional + Consumer []TopicConsumerParameters `json:"consumer,omitempty" tf:"consumer,omitempty"` + + // YDB database endpoint. Types: string, required. Default value: "". + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1.DatabaseServerless + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("ydb_full_endpoint",true) + // +kubebuilder:validation:Optional + DatabaseEndpoint *string `json:"databaseEndpoint,omitempty" tf:"database_endpoint,omitempty"` + + // Reference to a DatabaseServerless in ydb to populate databaseEndpoint. + // +kubebuilder:validation:Optional + DatabaseEndpointRef *v1.Reference `json:"databaseEndpointRef,omitempty" tf:"-"` + + // Selector for a DatabaseServerless in ydb to populate databaseEndpoint. + // +kubebuilder:validation:Optional + DatabaseEndpointSelector *v1.Selector `json:"databaseEndpointSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Resource metering mode (reserved_capacity - based on the allocated resources or request_units - based on actual usage). This option applies to topics in serverless databases. Value type: String. + // +kubebuilder:validation:Optional + MeteringMode *string `json:"meteringMode,omitempty" tf:"metering_mode,omitempty"` + + // Topic name. Type: string, required. Default value: "". + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Maximum allowed write speed per partition. If a write speed for a given partition exceeds this value, the write speed will be capped. Value type: integer, default value: 1024 (1MB). + // +kubebuilder:validation:Optional + PartitionWriteSpeedKbps *float64 `json:"partitionWriteSpeedKbps,omitempty" tf:"partition_write_speed_kbps,omitempty"` + + // Number of partitions. Types: integer, optional. Default value: 2. + // +kubebuilder:validation:Optional + PartitionsCount *float64 `json:"partitionsCount,omitempty" tf:"partitions_count,omitempty"` + + // +kubebuilder:validation:Optional + RetentionPeriodHours *float64 `json:"retentionPeriodHours,omitempty" tf:"retention_period_hours,omitempty"` + + // +kubebuilder:validation:Optional + RetentionStorageMb *float64 `json:"retentionStorageMb,omitempty" tf:"retention_storage_mb,omitempty"` + + // Supported data encodings. Types: array[string]. Default value: ["gzip", "raw", "zstd"]. + // +kubebuilder:validation:Optional + // +listType=set + SupportedCodecs []*string `json:"supportedCodecs,omitempty" tf:"supported_codecs,omitempty"` +} + +// TopicSpec defines the desired state of Topic +type TopicSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TopicParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TopicInitParameters `json:"initProvider,omitempty"` +} + +// TopicStatus defines the observed state of Topic. +type TopicStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TopicObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Topic is the Schema for the Topics API. Get information about a Yandex YDB Topics. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Topic struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec TopicSpec `json:"spec"` + Status TopicStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TopicList contains a list of Topics +type TopicList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Topic `json:"items"` +} + +// Repository type metadata. +var ( + Topic_Kind = "Topic" + Topic_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Topic_Kind}.String() + Topic_KindAPIVersion = Topic_Kind + "." + CRDGroupVersion.String() + Topic_GroupVersionKind = CRDGroupVersion.WithKind(Topic_Kind) +) + +func init() { + SchemeBuilder.Register(&Topic{}, &TopicList{}) +} diff --git a/apis/zz_register.go b/apis/zz_register.go new file mode 100755 index 0000000..3db7569 --- /dev/null +++ b/apis/zz_register.go @@ -0,0 +1,93 @@ +// Code generated by upjet. DO NOT EDIT. + +// Package apis contains Kubernetes API for the provider. +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/alb/v1alpha1" + v1alpha1api "github.com/tagesjump/provider-upjet-yc/apis/api/v1alpha1" + v1alpha1audit "github.com/tagesjump/provider-upjet-yc/apis/audit/v1alpha1" + v1alpha1backup "github.com/tagesjump/provider-upjet-yc/apis/backup/v1alpha1" + v1alpha1billing "github.com/tagesjump/provider-upjet-yc/apis/billing/v1alpha1" + v1alpha1cdn "github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1" + v1alpha1cm "github.com/tagesjump/provider-upjet-yc/apis/cm/v1alpha1" + v1alpha1compute "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + v1alpha1container "github.com/tagesjump/provider-upjet-yc/apis/container/v1alpha1" + v1alpha1dataproc "github.com/tagesjump/provider-upjet-yc/apis/dataproc/v1alpha1" + v1alpha1datatransfer "github.com/tagesjump/provider-upjet-yc/apis/datatransfer/v1alpha1" + v1alpha1dns "github.com/tagesjump/provider-upjet-yc/apis/dns/v1alpha1" + v1alpha1function "github.com/tagesjump/provider-upjet-yc/apis/function/v1alpha1" + v1alpha1iam "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha1iot "github.com/tagesjump/provider-upjet-yc/apis/iot/v1alpha1" + v1alpha1kms "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + v1alpha1kubernetes "github.com/tagesjump/provider-upjet-yc/apis/kubernetes/v1alpha1" + v1alpha1lb "github.com/tagesjump/provider-upjet-yc/apis/lb/v1alpha1" + v1alpha1loadtesting "github.com/tagesjump/provider-upjet-yc/apis/loadtesting/v1alpha1" + v1alpha1lockbox "github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1" + v1alpha1logging "github.com/tagesjump/provider-upjet-yc/apis/logging/v1alpha1" + v1alpha1mdb "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" + v1alpha1message "github.com/tagesjump/provider-upjet-yc/apis/message/v1alpha1" + v1alpha1monitoring "github.com/tagesjump/provider-upjet-yc/apis/monitoring/v1alpha1" + v1alpha1organizationmanager "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + v1alpha1resourcemanager "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha1serverless "github.com/tagesjump/provider-upjet-yc/apis/serverless/v1alpha1" + v1alpha1smartcaptcha "github.com/tagesjump/provider-upjet-yc/apis/smartcaptcha/v1alpha1" + v1alpha1storage "github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1" + v1alpha1sws "github.com/tagesjump/provider-upjet-yc/apis/sws/v1alpha1" + v1alpha1apis "github.com/tagesjump/provider-upjet-yc/apis/v1alpha1" + v1beta1 "github.com/tagesjump/provider-upjet-yc/apis/v1beta1" + v1alpha1vpc "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + v1alpha1yandex "github.com/tagesjump/provider-upjet-yc/apis/yandex/v1alpha1" + v1alpha1ydb "github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, + v1alpha1.SchemeBuilder.AddToScheme, + v1alpha1api.SchemeBuilder.AddToScheme, + v1alpha1audit.SchemeBuilder.AddToScheme, + v1alpha1backup.SchemeBuilder.AddToScheme, + v1alpha1billing.SchemeBuilder.AddToScheme, + v1alpha1cdn.SchemeBuilder.AddToScheme, + v1alpha1cm.SchemeBuilder.AddToScheme, + v1alpha1compute.SchemeBuilder.AddToScheme, + v1alpha1container.SchemeBuilder.AddToScheme, + v1alpha1dataproc.SchemeBuilder.AddToScheme, + v1alpha1datatransfer.SchemeBuilder.AddToScheme, + v1alpha1dns.SchemeBuilder.AddToScheme, + v1alpha1function.SchemeBuilder.AddToScheme, + v1alpha1iam.SchemeBuilder.AddToScheme, + v1alpha1iot.SchemeBuilder.AddToScheme, + v1alpha1kms.SchemeBuilder.AddToScheme, + v1alpha1kubernetes.SchemeBuilder.AddToScheme, + v1alpha1lb.SchemeBuilder.AddToScheme, + v1alpha1loadtesting.SchemeBuilder.AddToScheme, + v1alpha1lockbox.SchemeBuilder.AddToScheme, + v1alpha1logging.SchemeBuilder.AddToScheme, + v1alpha1mdb.SchemeBuilder.AddToScheme, + v1alpha1message.SchemeBuilder.AddToScheme, + v1alpha1monitoring.SchemeBuilder.AddToScheme, + v1alpha1organizationmanager.SchemeBuilder.AddToScheme, + v1alpha1resourcemanager.SchemeBuilder.AddToScheme, + v1alpha1serverless.SchemeBuilder.AddToScheme, + v1alpha1smartcaptcha.SchemeBuilder.AddToScheme, + v1alpha1storage.SchemeBuilder.AddToScheme, + v1alpha1sws.SchemeBuilder.AddToScheme, + v1alpha1apis.SchemeBuilder.AddToScheme, + v1beta1.SchemeBuilder.AddToScheme, + v1alpha1vpc.SchemeBuilder.AddToScheme, + v1alpha1yandex.SchemeBuilder.AddToScheme, + v1alpha1ydb.SchemeBuilder.AddToScheme, + ) +} + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/config/external_name.go b/config/external_name.go index a8e4863..b561330 100644 --- a/config/external_name.go +++ b/config/external_name.go @@ -146,8 +146,10 @@ var TerraformPluginSDKExternalNameConfigs = map[string]config.ExternalName{ // resources to be reconciled under the no-fork architecture for this // provider. var TerraformPluginFrameworkExternalNameConfigs = map[string]config.ExternalName{ - "yandex_mdb_mongodb_user": config.IdentifierFromProvider, - "yandex_mdb_mongodb_database": config.IdentifierFromProvider, + "yandex_mdb_mongodb_user": config.IdentifierFromProvider, + "yandex_mdb_mongodb_database": config.IdentifierFromProvider, + // "yandex_mdb_opensearch_cluster": config.IdentifierFromProvider, + // "yandex_airflow_cluster": config.IdentifierFromProvider, "yandex_compute_disk_placement_group_iam_binding": config.IdentifierFromProvider, "yandex_compute_disk_iam_binding": config.IdentifierFromProvider, "yandex_compute_image_iam_binding": config.IdentifierFromProvider, @@ -163,10 +165,7 @@ var TerraformPluginFrameworkExternalNameConfigs = map[string]config.ExternalName // cliReconciledExternalNameConfigs contains all external name configurations // belonging to Terraform resources to be reconciled under the CLI-based // architecture for this provider. -var cliReconciledExternalNameConfigs = map[string]config.ExternalName{ - "yandex_mdb_opensearch_cluster": config.IdentifierFromProvider, - // "yandex_airflow_cluster": config.IdentifierFromProvider, -} +var cliReconciledExternalNameConfigs = map[string]config.ExternalName{} // ExternalNameConfigurations applies all external name configs listed in the // table ExternalNameConfigs and sets the version of those resources to v1beta1 diff --git a/examples-generated/alb/v1alpha1/backendgroup.yaml b/examples-generated/alb/v1alpha1/backendgroup.yaml new file mode 100644 index 0000000..e3dee73 --- /dev/null +++ b/examples-generated/alb/v1alpha1/backendgroup.yaml @@ -0,0 +1,30 @@ +apiVersion: alb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: BackendGroup +metadata: + annotations: + meta.upbound.io/example-id: alb/v1alpha1/backendgroup + labels: + testing.upbound.io/example-name: test-backend-group + name: test-backend-group +spec: + forProvider: + httpBackend: + - healthcheck: + - httpHealthcheck: + - path: / + interval: 1s + timeout: 1s + http2: "true" + loadBalancingConfig: + - panicThreshold: 50 + name: test-http-backend + port: 8080 + targetGroupIdsRefs: + - name: test-target-group + tls: + - sni: backend-domain.internal + weight: 1 + name: my-backend-group + sessionAffinity: + - connection: + - sourceIp: 127.0.0.1 diff --git a/examples-generated/alb/v1alpha1/httprouter.yaml b/examples-generated/alb/v1alpha1/httprouter.yaml new file mode 100644 index 0000000..54853b7 --- /dev/null +++ b/examples-generated/alb/v1alpha1/httprouter.yaml @@ -0,0 +1,14 @@ +apiVersion: alb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: HTTPRouter +metadata: + annotations: + meta.upbound.io/example-id: alb/v1alpha1/httprouter + labels: + testing.upbound.io/example-name: tf-router + name: tf-router +spec: + forProvider: + labels: + - empty-label: s + tf-label: tf-label-value + name: my-http-router diff --git a/examples-generated/alb/v1alpha1/loadbalancer.yaml b/examples-generated/alb/v1alpha1/loadbalancer.yaml new file mode 100644 index 0000000..e354614 --- /dev/null +++ b/examples-generated/alb/v1alpha1/loadbalancer.yaml @@ -0,0 +1,38 @@ +apiVersion: alb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: LoadBalancer +metadata: + annotations: + meta.upbound.io/example-id: alb/v1alpha1/loadbalancer + labels: + testing.upbound.io/example-name: test-balancer + name: test-balancer +spec: + forProvider: + allocationPolicy: + - location: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: test-subnet + zoneId: ru-central1-a + listener: + - endpoint: + - address: + - externalIpv4Address: + - {} + ports: + - 8080 + http: + - handler: + - httpRouterIdSelector: + matchLabels: + testing.upbound.io/example-name: test-router + name: my-listener + logOptions: + - discardRule: + - discardPercent: 75 + httpCodeIntervals: + - 2XX + name: my-load-balancer + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: test-network diff --git a/examples-generated/alb/v1alpha1/targetgroup.yaml b/examples-generated/alb/v1alpha1/targetgroup.yaml new file mode 100644 index 0000000..6899ac9 --- /dev/null +++ b/examples-generated/alb/v1alpha1/targetgroup.yaml @@ -0,0 +1,20 @@ +apiVersion: alb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: TargetGroup +metadata: + annotations: + meta.upbound.io/example-id: alb/v1alpha1/targetgroup + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + name: my-target-group + target: + - ipAddress: ${yandex_compute_instance.my-instance-1.network_interface.0.ip_address} + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: my-subnet + - ipAddress: ${yandex_compute_instance.my-instance-2.network_interface.0.ip_address} + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: my-subnet diff --git a/examples-generated/alb/v1alpha1/virtualhost.yaml b/examples-generated/alb/v1alpha1/virtualhost.yaml new file mode 100644 index 0000000..b42a20d --- /dev/null +++ b/examples-generated/alb/v1alpha1/virtualhost.yaml @@ -0,0 +1,22 @@ +apiVersion: alb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: VirtualHost +metadata: + annotations: + meta.upbound.io/example-id: alb/v1alpha1/virtualhost + labels: + testing.upbound.io/example-name: my-virtual-host + name: my-virtual-host +spec: + forProvider: + httpRouterIdSelector: + matchLabels: + testing.upbound.io/example-name: my-router + name: my-virtual-host + route: + - httpRoute: + - httpRouteAction: + - backendGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: my-bg + timeout: 3s + name: my-route diff --git a/examples-generated/api/v1alpha1/gateway.yaml b/examples-generated/api/v1alpha1/gateway.yaml new file mode 100644 index 0000000..16c0731 --- /dev/null +++ b/examples-generated/api/v1alpha1/gateway.yaml @@ -0,0 +1,69 @@ +apiVersion: api.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Gateway +metadata: + annotations: + meta.upbound.io/example-id: api/v1alpha1/gateway + labels: + testing.upbound.io/example-name: test-api-gateway + name: test-api-gateway +spec: + forProvider: + canary: + - variables: + installation: dev + weight: 20 + connectivity: + - networkId: + customDomains: + - certificateId: + fqdn: test.example.com + description: any description + executionTimeout: "300" + labels: + empty-label: "" + label: label + logOptions: + - logGroupId: + minLevel: ERROR + name: some_name + spec: | + openapi: "3.0.0" + info: + version: 1.0.0 + title: Test API + x-yc-apigateway: + variables: + installation: + default: "prod" + enum: + - "prod" + - "dev" + paths: + /hello: + get: + summary: Say hello + operationId: hello + parameters: + - name: user + in: query + description: User name to appear in greetings + required: false + schema: + type: string + default: 'world' + responses: + '200': + description: Greeting + content: + 'text/plain': + schema: + type: "string" + x-yc-apigateway-integration: + type: dummy + http_code: 200 + http_headers: + 'Content-Type': "text/plain" + content: + 'text/plain': "Hello again, {user} from ${apigw.installation} release!\n" + variables: + installation: prod diff --git a/examples-generated/audit/v1alpha1/trailstrail.yaml b/examples-generated/audit/v1alpha1/trailstrail.yaml new file mode 100644 index 0000000..ee0922f --- /dev/null +++ b/examples-generated/audit/v1alpha1/trailstrail.yaml @@ -0,0 +1,36 @@ +apiVersion: audit.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: TrailsTrail +metadata: + annotations: + meta.upbound.io/example-id: audit/v1alpha1/trailstrail + labels: + testing.upbound.io/example-name: basic_trail + name: basic-trail +spec: + forProvider: + description: Some trail description + filteringPolicy: + - dataEventsFilter: + - resourceScope: + - resourceId: home-folder + resourceType: resource-manager.folder + service: storage + - resourceScope: + - resourceId: vpc-net-id-1 + resourceType: vpc.network + - resourceId: vpc-net-id-2 + resourceType: vpc.network + service: dns + managementEventsFilter: + - resourceScope: + - resourceId: home-folder + resourceType: resource-manager.folder + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + labels: + key: value + loggingDestination: + - logGroupId: some-log-group + name: a-trail + serviceAccountId: trail-service-account diff --git a/examples-generated/backup/v1alpha1/policy.yaml b/examples-generated/backup/v1alpha1/policy.yaml new file mode 100644 index 0000000..2f4e54e --- /dev/null +++ b/examples-generated/backup/v1alpha1/policy.yaml @@ -0,0 +1,21 @@ +apiVersion: backup.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: backup/v1alpha1/policy + labels: + testing.upbound.io/example-name: basic_policy + name: basic-policy +spec: + forProvider: + name: basic policy + reattempts: + - {} + retention: + - afterBackup: false + scheduling: + - backupSets: + - executeByInterval: 86400 + enabled: false + vmSnapshotReattempts: + - {} diff --git a/examples-generated/backup/v1alpha1/policybindings.yaml b/examples-generated/backup/v1alpha1/policybindings.yaml new file mode 100644 index 0000000..7f127ae --- /dev/null +++ b/examples-generated/backup/v1alpha1/policybindings.yaml @@ -0,0 +1,164 @@ +apiVersion: backup.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PolicyBindings +metadata: + annotations: + meta.upbound.io/example-id: backup/v1alpha1/policybindings + labels: + testing.upbound.io/example-name: test_backup_binding + name: test-backup-binding +spec: + forProvider: + instanceIdSelector: + matchLabels: + testing.upbound.io/example-name: test_backup_compute + policyId: ${data.yandex_backup_policy.test_backup_policy.id} + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Instance +metadata: + annotations: + meta.upbound.io/example-id: backup/v1alpha1/policybindings + labels: + testing.upbound.io/example-name: test_backup_compute + name: test-backup-compute +spec: + forProvider: + bootDisk: + - initializeParams: + - imageIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_compute_image + metadata: + user-data: | + #cloud-config + packages: + - curl + - perl + - jq + runcmd: + - curl https://storage.yandexcloud.net/backup-distributions/agent_installer.sh | sudo bash + name: test-backup-compute + networkInterface: + - nat: true + securityGroupIdsRefs: + - name: test_backup_security_group + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: test_backup_subnet + platformId: standard-v1 + resources: + - cores: 2 + memory: 4 + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: test_sa + zone: ru-central1-a + +--- + +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccount +metadata: + annotations: + meta.upbound.io/example-id: backup/v1alpha1/policybindings + labels: + testing.upbound.io/example-name: test_sa + name: test-sa +spec: + forProvider: {} + +--- + +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FolderIAMMember +metadata: + annotations: + meta.upbound.io/example-id: backup/v1alpha1/policybindings + labels: + testing.upbound.io/example-name: test_binding + name: test-binding +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: test_sa + member: serviceAccount:${yandex_iam_service_account.test_sa.id} + role: backup.editor + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: backup/v1alpha1/policybindings + labels: + testing.upbound.io/example-name: test_backup_network + name: test-backup-network +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: backup/v1alpha1/policybindings + labels: + testing.upbound.io/example-name: test_backup_security_group + name: test-backup-security-group +spec: + forProvider: + egress: + - fromPort: 7770 + protocol: TCP + toPort: 7800 + v4CidrBlocks: + - 84.47.172.0/24 + - port: 443 + protocol: TCP + v4CidrBlocks: + - 213.180.204.0/24 + - 213.180.193.0/24 + - 178.176.128.0/24 + - 84.201.181.0/24 + - 84.47.172.0/24 + - port: 80 + protocol: TCP + v4CidrBlocks: + - 213.180.204.0/24 + - 213.180.193.0/24 + - port: 8443 + protocol: TCP + v4CidrBlocks: + - 84.47.172.0/24 + - port: 44445 + protocol: TCP + v4CidrBlocks: + - 51.250.1.0/24 + name: cloud-backup + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: test_backup_network + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: backup/v1alpha1/policybindings + labels: + testing.upbound.io/example-name: test_backup_subnet + name: test-backup-subnet +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: test_backup_network + v4CidrBlocks: + - 192.168.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/billing/v1alpha1/cloudbinding.yaml b/examples-generated/billing/v1alpha1/cloudbinding.yaml new file mode 100644 index 0000000..aa9b330 --- /dev/null +++ b/examples-generated/billing/v1alpha1/cloudbinding.yaml @@ -0,0 +1,12 @@ +apiVersion: billing.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CloudBinding +metadata: + annotations: + meta.upbound.io/example-id: billing/v1alpha1/cloudbinding + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + billingAccountId: foo-ba-id + cloudId: foo-cloud-id diff --git a/examples-generated/cdn/v1alpha1/origingroup.yaml b/examples-generated/cdn/v1alpha1/origingroup.yaml new file mode 100644 index 0000000..3843787 --- /dev/null +++ b/examples-generated/cdn/v1alpha1/origingroup.yaml @@ -0,0 +1,18 @@ +apiVersion: cdn.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: OriginGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1alpha1/origingroup + labels: + testing.upbound.io/example-name: my_group + name: my-group +spec: + forProvider: + name: My Origin group + origin: + - source: ya.ru + - source: yandex.ru + - source: goo.gl + - backup: false + source: amazon.com + useNext: true diff --git a/examples-generated/cdn/v1alpha1/resource.yaml b/examples-generated/cdn/v1alpha1/resource.yaml new file mode 100644 index 0000000..2a10e41 --- /dev/null +++ b/examples-generated/cdn/v1alpha1/resource.yaml @@ -0,0 +1,26 @@ +apiVersion: cdn.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Resource +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1alpha1/resource + labels: + testing.upbound.io/example-name: my_resource + name: my-resource +spec: + forProvider: + active: false + cname: cdn1.yandex-example.ru + options: + - edgeCacheSettings: 345600 + ignoreCookie: true + staticRequestHeaders: + is-from-cdn: "yes" + staticResponseHeaders: + is-cdn: "yes" + originGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: foo_cdn_group_by_id + originProtocol: https + secondaryHostnames: + - cdn-example-1.yandex.ru + - cdn-example-2.yandex.ru diff --git a/examples-generated/cm/v1alpha1/certificate.yaml b/examples-generated/cm/v1alpha1/certificate.yaml new file mode 100644 index 0000000..0261027 --- /dev/null +++ b/examples-generated/cm/v1alpha1/certificate.yaml @@ -0,0 +1,15 @@ +apiVersion: cm.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Certificate +metadata: + annotations: + meta.upbound.io/example-id: cm/v1alpha1/certificate + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + domains: + - example.com + managed: + - challengeType: DNS_CNAME + name: example diff --git a/examples-generated/compute/v1alpha1/disk.yaml b/examples-generated/compute/v1alpha1/disk.yaml new file mode 100644 index 0000000..18d999c --- /dev/null +++ b/examples-generated/compute/v1alpha1/disk.yaml @@ -0,0 +1,18 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Disk +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/disk + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + imageIdSelector: + matchLabels: + testing.upbound.io/example-name: example + labels: + environment: test + name: disk-name + type: network-ssd + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/diskiambinding.yaml b/examples-generated/compute/v1alpha1/diskiambinding.yaml new file mode 100644 index 0000000..4eecd42 --- /dev/null +++ b/examples-generated/compute/v1alpha1/diskiambinding.yaml @@ -0,0 +1,35 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DiskIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/diskiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + diskId: ${data.yandex_compute_disk.disk1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Disk +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/diskiambinding + labels: + testing.upbound.io/example-name: disk1 + name: disk1 +spec: + forProvider: + imageIdSelector: + matchLabels: + testing.upbound.io/example-name: example + labels: + environment: test + name: disk-name + type: network-ssd + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/diskplacementgroup.yaml b/examples-generated/compute/v1alpha1/diskplacementgroup.yaml new file mode 100644 index 0000000..afa9d87 --- /dev/null +++ b/examples-generated/compute/v1alpha1/diskplacementgroup.yaml @@ -0,0 +1,15 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DiskPlacementGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/diskplacementgroup + labels: + testing.upbound.io/example-name: group1 + name: group1 +spec: + forProvider: + description: my description + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: test-pg diff --git a/examples-generated/compute/v1alpha1/diskplacementgroupiambinding.yaml b/examples-generated/compute/v1alpha1/diskplacementgroupiambinding.yaml new file mode 100644 index 0000000..fe2c29b --- /dev/null +++ b/examples-generated/compute/v1alpha1/diskplacementgroupiambinding.yaml @@ -0,0 +1,32 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DiskPlacementGroupIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/diskplacementgroupiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + diskPlacementGroupId: ${data.yandex_compute_disk_placement_group.group1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DiskPlacementGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/diskplacementgroupiambinding + labels: + testing.upbound.io/example-name: group1 + name: group1 +spec: + forProvider: + description: my description + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: test-pg diff --git a/examples-generated/compute/v1alpha1/filesystem.yaml b/examples-generated/compute/v1alpha1/filesystem.yaml new file mode 100644 index 0000000..bd5449d --- /dev/null +++ b/examples-generated/compute/v1alpha1/filesystem.yaml @@ -0,0 +1,16 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Filesystem +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/filesystem + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + labels: + environment: test + name: fs-name + size: 10 + type: network-ssd + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/filesystemiambinding.yaml b/examples-generated/compute/v1alpha1/filesystemiambinding.yaml new file mode 100644 index 0000000..215be1c --- /dev/null +++ b/examples-generated/compute/v1alpha1/filesystemiambinding.yaml @@ -0,0 +1,33 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FilesystemIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/filesystemiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + filesystemId: ${data.yandex_compute_filesystem.fs1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Filesystem +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/filesystemiambinding + labels: + testing.upbound.io/example-name: fs1 + name: fs1 +spec: + forProvider: + labels: + environment: test + name: fs-name + size: 10 + type: network-ssd + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/gpucluster.yaml b/examples-generated/compute/v1alpha1/gpucluster.yaml new file mode 100644 index 0000000..7f69dfc --- /dev/null +++ b/examples-generated/compute/v1alpha1/gpucluster.yaml @@ -0,0 +1,15 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: GpuCluster +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/gpucluster + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + interconnectType: infiniband + labels: + environment: test + name: gpu-cluster-name + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/gpuclusteriambinding.yaml b/examples-generated/compute/v1alpha1/gpuclusteriambinding.yaml new file mode 100644 index 0000000..51df5b7 --- /dev/null +++ b/examples-generated/compute/v1alpha1/gpuclusteriambinding.yaml @@ -0,0 +1,32 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: GpuClusterIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/gpuclusteriambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + gpuClusterId: ${data.yandex_compute_gpu_cluster.cluster1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: GpuCluster +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/gpuclusteriambinding + labels: + testing.upbound.io/example-name: cluster1 + name: cluster1 +spec: + forProvider: + interconnectType: infiniband + labels: + environment: test + name: gpu-cluster-name + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/image.yaml b/examples-generated/compute/v1alpha1/image.yaml new file mode 100644 index 0000000..c04a5d3 --- /dev/null +++ b/examples-generated/compute/v1alpha1/image.yaml @@ -0,0 +1,31 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Image +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/image + labels: + testing.upbound.io/example-name: foo-image + name: foo-image +spec: + forProvider: + name: my-custom-image + sourceUrl: https://storage.yandexcloud.net/lucky-images/kube-it.img + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Instance +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/image + labels: + testing.upbound.io/example-name: vm + name: vm +spec: + forProvider: + bootDisk: + - initializeParams: + - imageIdSelector: + matchLabels: + testing.upbound.io/example-name: foo-image + name: vm-from-custom-image diff --git a/examples-generated/compute/v1alpha1/imageiambinding.yaml b/examples-generated/compute/v1alpha1/imageiambinding.yaml new file mode 100644 index 0000000..57283df --- /dev/null +++ b/examples-generated/compute/v1alpha1/imageiambinding.yaml @@ -0,0 +1,29 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ImageIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/imageiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + imageId: ${data.yandex_compute_image.image1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Image +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/imageiambinding + labels: + testing.upbound.io/example-name: image1 + name: image1 +spec: + forProvider: + name: my-custom-image + sourceUrl: https://storage.yandexcloud.net/lucky-images/kube-it.img diff --git a/examples-generated/compute/v1alpha1/instance.yaml b/examples-generated/compute/v1alpha1/instance.yaml new file mode 100644 index 0000000..5555e92 --- /dev/null +++ b/examples-generated/compute/v1alpha1/instance.yaml @@ -0,0 +1,60 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Instance +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instance + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + bootDisk: + - diskIdSelector: + matchLabels: + testing.upbound.io/example-name: boot-disk + metadata: + foo: bar + ssh-keys: ubuntu:${file("~/.ssh/id_rsa.pub")} + name: test + networkInterface: + - index: 1 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + platformId: standard-v1 + resources: + - cores: 2 + memory: 4 + zone: ru-central1-a + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instance + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instance + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/instancegroup.yaml b/examples-generated/compute/v1alpha1/instancegroup.yaml new file mode 100644 index 0000000..f980911 --- /dev/null +++ b/examples-generated/compute/v1alpha1/instancegroup.yaml @@ -0,0 +1,58 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: InstanceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instancegroup + labels: + testing.upbound.io/example-name: group1 + name: group1 +spec: + forProvider: + allocationPolicy: + - zones: + - ru-central1-a + deletionProtection: true + deployPolicy: + - maxCreating: 2 + maxDeleting: 2 + maxExpansion: 2 + maxUnavailable: 2 + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + instanceTemplate: + - bootDisk: + - initializeParams: + - imageIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_compute_image + size: 4 + mode: READ_WRITE + labels: + label1: label1-value + label2: label2-value + metadata: + foo: bar + ssh-keys: ubuntu:${file("~/.ssh/id_rsa.pub")} + networkInterface: + - networkIdSelector: + matchLabels: + testing.upbound.io/example-name: my-inst-group-network + subnetIdsRefs: + - name: my-inst-group-subnet + networkSettings: + - type: STANDARD + platformId: standard-v1 + resources: + - cores: 2 + memory: 2 + name: test-ig + scalePolicy: + - fixedScale: + - size: 3 + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: test_account + variables: + test_key1: test_value1 + test_key2: test_value2 diff --git a/examples-generated/compute/v1alpha1/instanceiambinding.yaml b/examples-generated/compute/v1alpha1/instanceiambinding.yaml new file mode 100644 index 0000000..21c5ce0 --- /dev/null +++ b/examples-generated/compute/v1alpha1/instanceiambinding.yaml @@ -0,0 +1,77 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: InstanceIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instanceiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + instanceId: ${data.yandex_compute_instance.instance1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Instance +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instanceiambinding + labels: + testing.upbound.io/example-name: instance1 + name: instance1 +spec: + forProvider: + bootDisk: + - diskIdSelector: + matchLabels: + testing.upbound.io/example-name: boot-disk + metadata: + foo: bar + ssh-keys: ubuntu:${file("~/.ssh/id_rsa.pub")} + name: test + networkInterface: + - index: 1 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + platformId: standard-v1 + resources: + - cores: 2 + memory: 4 + zone: ru-central1-a + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instanceiambinding + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instanceiambinding + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/placementgroup.yaml b/examples-generated/compute/v1alpha1/placementgroup.yaml new file mode 100644 index 0000000..f0cfac4 --- /dev/null +++ b/examples-generated/compute/v1alpha1/placementgroup.yaml @@ -0,0 +1,15 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PlacementGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/placementgroup + labels: + testing.upbound.io/example-name: group1 + name: group1 +spec: + forProvider: + description: my description + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: test-pg diff --git a/examples-generated/compute/v1alpha1/placementgroupiambinding.yaml b/examples-generated/compute/v1alpha1/placementgroupiambinding.yaml new file mode 100644 index 0000000..e94f74a --- /dev/null +++ b/examples-generated/compute/v1alpha1/placementgroupiambinding.yaml @@ -0,0 +1,32 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PlacementGroupIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/placementgroupiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + members: + - userAccount:some_user_id + placementGroupId: ${data.yandex_compute_placement_group.pg1.id} + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PlacementGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/placementgroupiambinding + labels: + testing.upbound.io/example-name: pg1 + name: pg1 +spec: + forProvider: + description: my description + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: test-pg diff --git a/examples-generated/compute/v1alpha1/snapshot.yaml b/examples-generated/compute/v1alpha1/snapshot.yaml new file mode 100644 index 0000000..35b36db --- /dev/null +++ b/examples-generated/compute/v1alpha1/snapshot.yaml @@ -0,0 +1,16 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Snapshot +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshot + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + labels: + my-label: my-label-value + name: test-snapshot + sourceDiskIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1alpha1/snapshotiambinding.yaml b/examples-generated/compute/v1alpha1/snapshotiambinding.yaml new file mode 100644 index 0000000..2810255 --- /dev/null +++ b/examples-generated/compute/v1alpha1/snapshotiambinding.yaml @@ -0,0 +1,33 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SnapshotIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshotiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + members: + - userAccount:some_user_id + role: editor + snapshotId: ${data.yandex_compute_snapshot.snapshot1.id} + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Snapshot +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshotiambinding + labels: + testing.upbound.io/example-name: snapshot1 + name: snapshot1 +spec: + forProvider: + labels: + my-label: my-label-value + name: test-snapshot + sourceDiskIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1alpha1/snapshotschedule.yaml b/examples-generated/compute/v1alpha1/snapshotschedule.yaml new file mode 100644 index 0000000..06e1049 --- /dev/null +++ b/examples-generated/compute/v1alpha1/snapshotschedule.yaml @@ -0,0 +1,23 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SnapshotSchedule +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshotschedule + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + diskIdsRefs: + - name: example + - name: example + labels: + my-label: my-label-value + name: my-name + schedulePolicy: + - expression: 0 0 * * * + snapshotCount: 1 + snapshotSpec: + - description: snapshot-description + labels: + snapshot-label: my-snapshot-label-value diff --git a/examples-generated/compute/v1alpha1/snapshotscheduleiambinding.yaml b/examples-generated/compute/v1alpha1/snapshotscheduleiambinding.yaml new file mode 100644 index 0000000..2c3d6ed --- /dev/null +++ b/examples-generated/compute/v1alpha1/snapshotscheduleiambinding.yaml @@ -0,0 +1,35 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SnapshotScheduleIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshotscheduleiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + members: + - userAccount:some_user_id + role: editor + snapshotScheduleId: ${data.yandex_compute_snapshot_schedule.schedule1.id} + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SnapshotSchedule +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshotscheduleiambinding + labels: + testing.upbound.io/example-name: schedule1 + name: schedule1 +spec: + forProvider: + diskIdsRefs: + - name: example + - name: example + retentionPeriod: 12h + schedulePolicy: + - expression: 0 0 * * * + snapshotSpec: + - description: retention-snapshot diff --git a/examples-generated/container/v1alpha1/registry.yaml b/examples-generated/container/v1alpha1/registry.yaml new file mode 100644 index 0000000..40d7568 --- /dev/null +++ b/examples-generated/container/v1alpha1/registry.yaml @@ -0,0 +1,16 @@ +apiVersion: container.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Registry +metadata: + annotations: + meta.upbound.io/example-id: container/v1alpha1/registry + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + labels: + my-label: my-label-value + name: test-registry diff --git a/examples-generated/container/v1alpha1/registryiambinding.yaml b/examples-generated/container/v1alpha1/registryiambinding.yaml new file mode 100644 index 0000000..05c0101 --- /dev/null +++ b/examples-generated/container/v1alpha1/registryiambinding.yaml @@ -0,0 +1,33 @@ +apiVersion: container.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: RegistryIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: container/v1alpha1/registryiambinding + labels: + testing.upbound.io/example-name: puller + name: puller +spec: + forProvider: + registryIdSelector: + matchLabels: + testing.upbound.io/example-name: your-registry + role: container-registry.images.puller + serviceAccountRef: + - name: example + +--- + +apiVersion: container.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Registry +metadata: + annotations: + meta.upbound.io/example-id: container/v1alpha1/registryiambinding + labels: + testing.upbound.io/example-name: your-registry + name: your-registry +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: registry-name diff --git a/examples-generated/container/v1alpha1/registryippermission.yaml b/examples-generated/container/v1alpha1/registryippermission.yaml new file mode 100644 index 0000000..fd0ca2d --- /dev/null +++ b/examples-generated/container/v1alpha1/registryippermission.yaml @@ -0,0 +1,34 @@ +apiVersion: container.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: RegistryIPPermission +metadata: + annotations: + meta.upbound.io/example-id: container/v1alpha1/registryippermission + labels: + testing.upbound.io/example-name: my_ip_permission + name: my-ip-permission +spec: + forProvider: + pull: + - 10.1.0.0/16 + - 10.5.0/16 + push: + - 10.1.0.0/16 + - 10.2.0.0/16 + - 10.3.0.0/16 + registryIdSelector: + matchLabels: + testing.upbound.io/example-name: my_registry + +--- + +apiVersion: container.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Registry +metadata: + annotations: + meta.upbound.io/example-id: container/v1alpha1/registryippermission + labels: + testing.upbound.io/example-name: my_registry + name: my-registry +spec: + forProvider: + name: test-registry diff --git a/examples-generated/dataproc/v1alpha1/cluster.yaml b/examples-generated/dataproc/v1alpha1/cluster.yaml new file mode 100644 index 0000000..c7234e3 --- /dev/null +++ b/examples-generated/dataproc/v1alpha1/cluster.yaml @@ -0,0 +1,208 @@ +apiVersion: dataproc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: foo + clusterConfig: + - hadoop: + - initializationAction: + - args: + - arg1 + - arg2 + uri: s3a://yandex_storage_bucket.foo.bucket/scripts/script.sh + properties: + yarn:yarn.resourcemanager.am.max-attempts: 5 + services: + - HDFS + - YARN + - SPARK + - TEZ + - MAPREDUCE + - HIVE + sshPublicKeys: + - ${file("~/.ssh/id_rsa.pub")} + subclusterSpec: + - hostsCount: 1 + name: main + resources: + - diskSize: 20 + diskTypeId: network-hdd + resourcePresetId: s2.small + role: MASTERNODE + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + - hostsCount: 2 + name: data + resources: + - diskSize: 20 + diskTypeId: network-hdd + resourcePresetId: s2.small + role: DATANODE + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + - hostsCount: 2 + name: compute + resources: + - diskSize: 20 + diskTypeId: network-hdd + resourcePresetId: s2.small + role: COMPUTENODE + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + - autoscalingConfig: + - decommissionTimeout: 60 + maxHostsCount: 10 + measurementDuration: 60 + preemptible: false + stabilizationDuration: 120 + warmupDuration: 60 + hostsCount: 2 + name: compute_autoscaling + resources: + - diskSize: 20 + diskTypeId: network-hdd + resourcePresetId: s2.small + role: COMPUTENODE + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + description: Dataproc Cluster created by Terraform + labels: + created_by: terraform + name: dataproc-cluster + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: dataproc + zoneId: ru-central1-b + +--- + +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccount +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: dataproc + name: dataproc +spec: + forProvider: + description: service account to manage Dataproc Cluster + +--- + +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccountStaticAccessKey +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: dataproc + +--- + +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FolderIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: bucket-creator + name: bucket-creator +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + members: + - serviceAccount:${yandex_iam_service_account.dataproc.id} + role: editor + +--- + +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FolderIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: dataproc + name: dataproc +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + members: + - serviceAccount:${yandex_iam_service_account.dataproc.id} + role: mdb.dataproc.agent + +--- + +apiVersion: storage.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Bucket +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + accessKeySelector: + matchLabels: + testing.upbound.io/example-name: foo + bucket: foo + secretKeySecretRef: + key: attribute.secret_key + name: example-iam-service-account-static-access-key + namespace: upbound-system + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.1.0.0/24 + zone: ru-central1-b diff --git a/examples-generated/dns/v1alpha1/recordset.yaml b/examples-generated/dns/v1alpha1/recordset.yaml new file mode 100644 index 0000000..d93de00 --- /dev/null +++ b/examples-generated/dns/v1alpha1/recordset.yaml @@ -0,0 +1,52 @@ +apiVersion: dns.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Recordset +metadata: + annotations: + meta.upbound.io/example-id: dns/v1alpha1/recordset + labels: + testing.upbound.io/example-name: rs1 + name: rs1 +spec: + forProvider: + data: + - 10.1.0.1 + name: srv.example.com. + ttl: 200 + type: A + zoneIdSelector: + matchLabels: + testing.upbound.io/example-name: zone1 + +--- + +apiVersion: dns.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Zone +metadata: + annotations: + meta.upbound.io/example-id: dns/v1alpha1/recordset + labels: + testing.upbound.io/example-name: zone1 + name: zone1 +spec: + forProvider: + description: desc + labels: + label1: label-1-value + name: my_private_zone + privateNetworksRefs: + - name: foo + public: false + zone: example.com. + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: dns/v1alpha1/recordset + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} diff --git a/examples-generated/dns/v1alpha1/zone.yaml b/examples-generated/dns/v1alpha1/zone.yaml new file mode 100644 index 0000000..c6ff748 --- /dev/null +++ b/examples-generated/dns/v1alpha1/zone.yaml @@ -0,0 +1,53 @@ +apiVersion: dns.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Zone +metadata: + annotations: + meta.upbound.io/example-id: dns/v1alpha1/zone + labels: + testing.upbound.io/example-name: zone1 + name: zone1 +spec: + forProvider: + deletionProtection: true + description: desc + labels: + label1: label-1-value + name: my-private-zone + privateNetworksRefs: + - name: foo + public: false + zone: example.com. + +--- + +apiVersion: dns.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Recordset +metadata: + annotations: + meta.upbound.io/example-id: dns/v1alpha1/zone + labels: + testing.upbound.io/example-name: rs1 + name: rs1 +spec: + forProvider: + data: + - 10.1.0.1 + name: srv.example.com. + ttl: 200 + type: A + zoneIdSelector: + matchLabels: + testing.upbound.io/example-name: zone1 + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: dns/v1alpha1/zone + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} diff --git a/examples-generated/dns/v1alpha1/zoneiambinding.yaml b/examples-generated/dns/v1alpha1/zoneiambinding.yaml new file mode 100644 index 0000000..ccfcad3 --- /dev/null +++ b/examples-generated/dns/v1alpha1/zoneiambinding.yaml @@ -0,0 +1,31 @@ +apiVersion: dns.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ZoneIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: dns/v1alpha1/zoneiambinding + labels: + testing.upbound.io/example-name: viewer + name: viewer +spec: + forProvider: + dnsZoneIdSelector: + matchLabels: + testing.upbound.io/example-name: zone1 + members: + - userAccount:foo_user_id + role: dns.viewer + +--- + +apiVersion: dns.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Zone +metadata: + annotations: + meta.upbound.io/example-id: dns/v1alpha1/zoneiambinding + labels: + testing.upbound.io/example-name: zone1 + name: zone1 +spec: + forProvider: + name: my-private-zone + zone: example.com. diff --git a/examples-generated/function/v1alpha1/iambinding.yaml b/examples-generated/function/v1alpha1/iambinding.yaml new file mode 100644 index 0000000..27f464d --- /dev/null +++ b/examples-generated/function/v1alpha1/iambinding.yaml @@ -0,0 +1,14 @@ +apiVersion: function.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: IAMBinding +metadata: + annotations: + meta.upbound.io/example-id: function/v1alpha1/iambinding + labels: + testing.upbound.io/example-name: function-iam + name: function-iam +spec: + forProvider: + functionId: your-function-id + members: + - system:allUsers + role: serverless.functions.invoker diff --git a/examples-generated/function/v1alpha1/scalingpolicy.yaml b/examples-generated/function/v1alpha1/scalingpolicy.yaml new file mode 100644 index 0000000..5628a07 --- /dev/null +++ b/examples-generated/function/v1alpha1/scalingpolicy.yaml @@ -0,0 +1,18 @@ +apiVersion: function.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ScalingPolicy +metadata: + annotations: + meta.upbound.io/example-id: function/v1alpha1/scalingpolicy + labels: + testing.upbound.io/example-name: my_scaling_policy + name: my-scaling-policy +spec: + forProvider: + functionId: are1samplefunction11 + policy: + - tag: $latest + zoneInstancesLimit: 3 + zoneRequestsLimit: 100 + - tag: my_tag + zoneInstancesLimit: 4 + zoneRequestsLimit: 150 diff --git a/examples-generated/function/v1alpha1/trigger.yaml b/examples-generated/function/v1alpha1/trigger.yaml new file mode 100644 index 0000000..c424f25 --- /dev/null +++ b/examples-generated/function/v1alpha1/trigger.yaml @@ -0,0 +1,16 @@ +apiVersion: function.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Trigger +metadata: + annotations: + meta.upbound.io/example-id: function/v1alpha1/trigger + labels: + testing.upbound.io/example-name: my_trigger + name: my-trigger +spec: + forProvider: + description: any description + function: + - id: tf-test + name: some_name + timer: + - cronExpression: '* * * * ? *' diff --git a/examples-generated/iam/v1alpha1/serviceaccount.yaml b/examples-generated/iam/v1alpha1/serviceaccount.yaml new file mode 100644 index 0000000..e58ee60 --- /dev/null +++ b/examples-generated/iam/v1alpha1/serviceaccount.yaml @@ -0,0 +1,11 @@ +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccount +metadata: + annotations: + meta.upbound.io/example-id: iam/v1alpha1/serviceaccount + labels: + testing.upbound.io/example-name: sa + name: sa +spec: + forProvider: + description: service account to manage VMs diff --git a/examples-generated/iam/v1alpha1/serviceaccountapikey.yaml b/examples-generated/iam/v1alpha1/serviceaccountapikey.yaml new file mode 100644 index 0000000..58571e2 --- /dev/null +++ b/examples-generated/iam/v1alpha1/serviceaccountapikey.yaml @@ -0,0 +1,17 @@ +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccountAPIKey +metadata: + annotations: + meta.upbound.io/example-id: iam/v1alpha1/serviceaccountapikey + labels: + testing.upbound.io/example-name: sa-api-key + name: sa-api-key +spec: + forProvider: + description: api key for authorization + expiresAt: "2024-11-11T00:00:00Z" + pgpKey: keybase:keybaseusername + scope: yc.ydb.topics.manage + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/iam/v1alpha1/serviceaccountiambinding.yaml b/examples-generated/iam/v1alpha1/serviceaccountiambinding.yaml new file mode 100644 index 0000000..78b52b3 --- /dev/null +++ b/examples-generated/iam/v1alpha1/serviceaccountiambinding.yaml @@ -0,0 +1,16 @@ +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccountIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: iam/v1alpha1/serviceaccountiambinding + labels: + testing.upbound.io/example-name: admin-account-iam + name: admin-account-iam +spec: + forProvider: + role: admin + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceAccountRef: + - name: example diff --git a/examples-generated/iam/v1alpha1/serviceaccountiammember.yaml b/examples-generated/iam/v1alpha1/serviceaccountiammember.yaml new file mode 100644 index 0000000..90475a2 --- /dev/null +++ b/examples-generated/iam/v1alpha1/serviceaccountiammember.yaml @@ -0,0 +1,17 @@ +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccountIAMMember +metadata: + annotations: + meta.upbound.io/example-id: iam/v1alpha1/serviceaccountiammember + labels: + testing.upbound.io/example-name: admin-account-iam + name: admin-account-iam +spec: + forProvider: + role: admin + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceAccountSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/iam/v1alpha1/serviceaccountiampolicy.yaml b/examples-generated/iam/v1alpha1/serviceaccountiampolicy.yaml new file mode 100644 index 0000000..b350e4e --- /dev/null +++ b/examples-generated/iam/v1alpha1/serviceaccountiampolicy.yaml @@ -0,0 +1,14 @@ +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccountIAMPolicy +metadata: + annotations: + meta.upbound.io/example-id: iam/v1alpha1/serviceaccountiampolicy + labels: + testing.upbound.io/example-name: admin-account-iam + name: admin-account-iam +spec: + forProvider: + policyData: ${data.yandex_iam_policy.admin.policy_data} + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/iam/v1alpha1/serviceaccountkey.yaml b/examples-generated/iam/v1alpha1/serviceaccountkey.yaml new file mode 100644 index 0000000..cac96bc --- /dev/null +++ b/examples-generated/iam/v1alpha1/serviceaccountkey.yaml @@ -0,0 +1,16 @@ +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccountKey +metadata: + annotations: + meta.upbound.io/example-id: iam/v1alpha1/serviceaccountkey + labels: + testing.upbound.io/example-name: sa-auth-key + name: sa-auth-key +spec: + forProvider: + description: key for service account + keyAlgorithm: RSA_4096 + pgpKey: keybase:keybaseusername + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/iam/v1alpha1/serviceaccountstaticaccesskey.yaml b/examples-generated/iam/v1alpha1/serviceaccountstaticaccesskey.yaml new file mode 100644 index 0000000..b0f2f94 --- /dev/null +++ b/examples-generated/iam/v1alpha1/serviceaccountstaticaccesskey.yaml @@ -0,0 +1,15 @@ +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccountStaticAccessKey +metadata: + annotations: + meta.upbound.io/example-id: iam/v1alpha1/serviceaccountstaticaccesskey + labels: + testing.upbound.io/example-name: sa-static-key + name: sa-static-key +spec: + forProvider: + description: static access key for object storage + pgpKey: keybase:keybaseusername + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/iot/v1alpha1/corebroker.yaml b/examples-generated/iot/v1alpha1/corebroker.yaml new file mode 100644 index 0000000..aea69ce --- /dev/null +++ b/examples-generated/iot/v1alpha1/corebroker.yaml @@ -0,0 +1,20 @@ +apiVersion: iot.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CoreBroker +metadata: + annotations: + meta.upbound.io/example-id: iot/v1alpha1/corebroker + labels: + testing.upbound.io/example-name: my_broker + name: my-broker +spec: + forProvider: + certificates: + - public part of certificate1 + - public part of certificate2 + description: any description + labels: + my-label: my-label-value + logOptions: + - logGroupId: log-group-id + minLevel: ERROR + name: some_name diff --git a/examples-generated/iot/v1alpha1/coredevice.yaml b/examples-generated/iot/v1alpha1/coredevice.yaml new file mode 100644 index 0000000..70443a6 --- /dev/null +++ b/examples-generated/iot/v1alpha1/coredevice.yaml @@ -0,0 +1,23 @@ +apiVersion: iot.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CoreDevice +metadata: + annotations: + meta.upbound.io/example-id: iot/v1alpha1/coredevice + labels: + testing.upbound.io/example-name: my_device + name: my-device +spec: + forProvider: + aliases: + some_alias1/subtopic: $devices/{id}/events/somesubtopic + some_alias2/subtopic: $devices/{id}/events/aaa/bbb + certificates: + - public part of certificate1 + - public part of certificate2 + description: any description + name: some_name + passwordsSecretRef: + - key: example-key + name: example-secret + namespace: upbound-system + registryId: are1sampleregistryid11 diff --git a/examples-generated/iot/v1alpha1/coreregistry.yaml b/examples-generated/iot/v1alpha1/coreregistry.yaml new file mode 100644 index 0000000..ee54ff9 --- /dev/null +++ b/examples-generated/iot/v1alpha1/coreregistry.yaml @@ -0,0 +1,24 @@ +apiVersion: iot.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CoreRegistry +metadata: + annotations: + meta.upbound.io/example-id: iot/v1alpha1/coreregistry + labels: + testing.upbound.io/example-name: my_registry + name: my-registry +spec: + forProvider: + certificates: + - public part of certificate1 + - public part of certificate2 + description: any description + labels: + my-label: my-label-value + logOptions: + - logGroupId: log-group-id + minLevel: ERROR + name: some_name + passwordsSecretRef: + - key: example-key + name: example-secret + namespace: upbound-system diff --git a/examples-generated/kms/v1alpha1/asymmetricencryptionkey.yaml b/examples-generated/kms/v1alpha1/asymmetricencryptionkey.yaml new file mode 100644 index 0000000..a8415f6 --- /dev/null +++ b/examples-generated/kms/v1alpha1/asymmetricencryptionkey.yaml @@ -0,0 +1,13 @@ +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: AsymmetricEncryptionKey +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/asymmetricencryptionkey + labels: + testing.upbound.io/example-name: key-a + name: key-a +spec: + forProvider: + description: description for key + encryptionAlgorithm: RSA_2048_ENC_OAEP_SHA_256 + name: example-asymetric-encryption-key diff --git a/examples-generated/kms/v1alpha1/asymmetricencryptionkeyiambinding.yaml b/examples-generated/kms/v1alpha1/asymmetricencryptionkeyiambinding.yaml new file mode 100644 index 0000000..b50a086 --- /dev/null +++ b/examples-generated/kms/v1alpha1/asymmetricencryptionkeyiambinding.yaml @@ -0,0 +1,33 @@ +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: AsymmetricEncryptionKeyIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/asymmetricencryptionkeyiambinding + labels: + testing.upbound.io/example-name: viewer + name: viewer +spec: + forProvider: + asymmetricEncryptionKeyIdSelector: + matchLabels: + testing.upbound.io/example-name: your-key + role: viewer + serviceAccountRef: + - name: example + +--- + +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: AsymmetricEncryptionKey +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/asymmetricencryptionkeyiambinding + labels: + testing.upbound.io/example-name: your-key + name: your-key +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: asymmetric-encryption-key-name diff --git a/examples-generated/kms/v1alpha1/asymmetricsignaturekey.yaml b/examples-generated/kms/v1alpha1/asymmetricsignaturekey.yaml new file mode 100644 index 0000000..d1f22c1 --- /dev/null +++ b/examples-generated/kms/v1alpha1/asymmetricsignaturekey.yaml @@ -0,0 +1,13 @@ +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: AsymmetricSignatureKey +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/asymmetricsignaturekey + labels: + testing.upbound.io/example-name: key-a + name: key-a +spec: + forProvider: + description: description for key + name: example-asymetric-signature-key + signatureAlgorithm: RSA_2048_SIGN_PSS_SHA_256 diff --git a/examples-generated/kms/v1alpha1/asymmetricsignaturekeyiambinding.yaml b/examples-generated/kms/v1alpha1/asymmetricsignaturekeyiambinding.yaml new file mode 100644 index 0000000..e1821e9 --- /dev/null +++ b/examples-generated/kms/v1alpha1/asymmetricsignaturekeyiambinding.yaml @@ -0,0 +1,31 @@ +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: AsymmetricSignatureKeyIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/asymmetricsignaturekeyiambinding + labels: + testing.upbound.io/example-name: viewer + name: viewer +spec: + forProvider: + asymmetric_signaturen_key_id: ${yandex_kms_asymmetric_signature_key.your-key.id} + role: viewer + serviceAccountRef: + - name: example + +--- + +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: AsymmetricSignatureKey +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/asymmetricsignaturekeyiambinding + labels: + testing.upbound.io/example-name: your-key + name: your-key +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: asymmetric-signature-key-name diff --git a/examples-generated/kms/v1alpha1/secretciphertext.yaml b/examples-generated/kms/v1alpha1/secretciphertext.yaml new file mode 100644 index 0000000..bb8af1c --- /dev/null +++ b/examples-generated/kms/v1alpha1/secretciphertext.yaml @@ -0,0 +1,33 @@ +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecretCiphertext +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/secretciphertext + labels: + testing.upbound.io/example-name: password + name: password +spec: + forProvider: + aadContext: additional authenticated data + keyIdSelector: + matchLabels: + testing.upbound.io/example-name: example + plaintextSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + +--- + +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SymmetricKey +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/secretciphertext + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: description for key + name: example-symetric-key diff --git a/examples-generated/kms/v1alpha1/symmetrickey.yaml b/examples-generated/kms/v1alpha1/symmetrickey.yaml new file mode 100644 index 0000000..66bea35 --- /dev/null +++ b/examples-generated/kms/v1alpha1/symmetrickey.yaml @@ -0,0 +1,14 @@ +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SymmetricKey +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/symmetrickey + labels: + testing.upbound.io/example-name: key-a + name: key-a +spec: + forProvider: + defaultAlgorithm: AES_128 + description: description for key + name: example-symetric-key + rotationPeriod: 8760h diff --git a/examples-generated/kms/v1alpha1/symmetrickeyiambinding.yaml b/examples-generated/kms/v1alpha1/symmetrickeyiambinding.yaml new file mode 100644 index 0000000..b572e02 --- /dev/null +++ b/examples-generated/kms/v1alpha1/symmetrickeyiambinding.yaml @@ -0,0 +1,33 @@ +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SymmetricKeyIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/symmetrickeyiambinding + labels: + testing.upbound.io/example-name: viewer + name: viewer +spec: + forProvider: + role: viewer + serviceAccountRef: + - name: example + symmetricKeyIdSelector: + matchLabels: + testing.upbound.io/example-name: your-key + +--- + +apiVersion: kms.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SymmetricKey +metadata: + annotations: + meta.upbound.io/example-id: kms/v1alpha1/symmetrickeyiambinding + labels: + testing.upbound.io/example-name: your-key + name: your-key +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: symmetric-key-name diff --git a/examples-generated/kubernetes/v1alpha1/cluster.yaml b/examples-generated/kubernetes/v1alpha1/cluster.yaml new file mode 100644 index 0000000..3e7154c --- /dev/null +++ b/examples-generated/kubernetes/v1alpha1/cluster.yaml @@ -0,0 +1,56 @@ +apiVersion: kubernetes.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: kubernetes/v1alpha1/cluster + labels: + testing.upbound.io/example-name: zonal_cluster_resource_name + name: zonal-cluster-resource-name +spec: + forProvider: + description: description + kmsProvider: + - keyIdSelector: + matchLabels: + testing.upbound.io/example-name: kms_key_resource_name + labels: + my_key: my_value + my_other_key: my_other_value + master: + - maintenancePolicy: + - autoUpgrade: true + maintenanceWindow: + - duration: 3h + startTime: "15:00" + masterLogging: + - auditEnabled: true + clusterAutoscalerEnabled: true + enabled: true + eventsEnabled: true + kubeApiserverEnabled: true + logGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: log_group_resoruce_name + publicIp: true + securityGroupIdsRefs: + - name: security_group_name + version: "1.17" + zonal: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: subnet_resource_name + zoneSelector: + matchLabels: + testing.upbound.io/example-name: subnet_resource_name + name: name + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: network_resource_name + networkPolicyProvider: CALICO + nodeServiceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: node_service_account_resource_name + releaseChannel: RAPID + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: service_account_resource_name diff --git a/examples-generated/kubernetes/v1alpha1/nodegroup.yaml b/examples-generated/kubernetes/v1alpha1/nodegroup.yaml new file mode 100644 index 0000000..01abe60 --- /dev/null +++ b/examples-generated/kubernetes/v1alpha1/nodegroup.yaml @@ -0,0 +1,50 @@ +apiVersion: kubernetes.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: NodeGroup +metadata: + annotations: + meta.upbound.io/example-id: kubernetes/v1alpha1/nodegroup + labels: + testing.upbound.io/example-name: my_node_group + name: my-node-group +spec: + forProvider: + allocationPolicy: + - location: + - zone: ru-central1-a + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: my_cluster + description: description + instanceTemplate: + - bootDisk: + - size: 64 + type: network-hdd + containerRuntime: + - type: containerd + networkInterface: + - nat: true + subnetIdsRefs: + - name: my_subnet + platformId: standard-v2 + resources: + - cores: 2 + memory: 2 + schedulingPolicy: + - preemptible: false + labels: + key: value + maintenancePolicy: + - autoRepair: true + autoUpgrade: true + maintenanceWindow: + - day: monday + duration: 3h + startTime: "15:00" + - day: friday + duration: 4h30m + startTime: "10:00" + name: name + scalePolicy: + - fixedScale: + - size: 1 + version: "1.17" diff --git a/examples-generated/lb/v1alpha1/networkloadbalancer.yaml b/examples-generated/lb/v1alpha1/networkloadbalancer.yaml new file mode 100644 index 0000000..c69d863 --- /dev/null +++ b/examples-generated/lb/v1alpha1/networkloadbalancer.yaml @@ -0,0 +1,25 @@ +apiVersion: lb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: NetworkLoadBalancer +metadata: + annotations: + meta.upbound.io/example-id: lb/v1alpha1/networkloadbalancer + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + attachedTargetGroup: + - healthcheck: + - httpOptions: + - path: /ping + port: 8080 + name: http + targetGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: my-target-group + listener: + - externalAddressSpec: + - ipVersion: ipv4 + name: my-listener + port: 8080 + name: my-network-load-balancer diff --git a/examples-generated/lb/v1alpha1/targetgroup.yaml b/examples-generated/lb/v1alpha1/targetgroup.yaml new file mode 100644 index 0000000..340805b --- /dev/null +++ b/examples-generated/lb/v1alpha1/targetgroup.yaml @@ -0,0 +1,21 @@ +apiVersion: lb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: TargetGroup +metadata: + annotations: + meta.upbound.io/example-id: lb/v1alpha1/targetgroup + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + name: my-target-group + regionId: ru-central1 + target: + - address: ${yandex_compute_instance.my-instance-1.network_interface.0.ip_address} + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: my-subnet + - address: ${yandex_compute_instance.my-instance-2.network_interface.0.ip_address} + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: my-subnet diff --git a/examples-generated/loadtesting/v1alpha1/agent.yaml b/examples-generated/loadtesting/v1alpha1/agent.yaml new file mode 100644 index 0000000..587b13e --- /dev/null +++ b/examples-generated/loadtesting/v1alpha1/agent.yaml @@ -0,0 +1,33 @@ +apiVersion: loadtesting.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Agent +metadata: + annotations: + meta.upbound.io/example-id: loadtesting/v1alpha1/agent + labels: + testing.upbound.io/example-name: my-agent + name: my-agent +spec: + forProvider: + computeInstance: + - bootDisk: + - autoDelete: true + initializeParams: + - size: 15 + networkInterface: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: my-subnet-a + resources: + - cores: 2 + memory: 4 + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: test_account + zoneId: ru-central1-b + description: 2 core 4 GB RAM agent + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + labels: + jmeter: "5" + name: my-agent diff --git a/examples-generated/lockbox/v1alpha1/secret.yaml b/examples-generated/lockbox/v1alpha1/secret.yaml new file mode 100644 index 0000000..6323e0f --- /dev/null +++ b/examples-generated/lockbox/v1alpha1/secret.yaml @@ -0,0 +1,11 @@ +apiVersion: lockbox.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Secret +metadata: + annotations: + meta.upbound.io/example-id: lockbox/v1alpha1/secret + labels: + testing.upbound.io/example-name: my_secret + name: my-secret +spec: + forProvider: + name: test secret diff --git a/examples-generated/lockbox/v1alpha1/secretiambinding.yaml b/examples-generated/lockbox/v1alpha1/secretiambinding.yaml new file mode 100644 index 0000000..070258a --- /dev/null +++ b/examples-generated/lockbox/v1alpha1/secretiambinding.yaml @@ -0,0 +1,30 @@ +apiVersion: lockbox.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecretIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: lockbox/v1alpha1/secretiambinding + labels: + testing.upbound.io/example-name: viewer + name: viewer +spec: + forProvider: + role: viewer + secretIdSelector: + matchLabels: + testing.upbound.io/example-name: your-secret + serviceAccountRef: + - name: example + +--- + +apiVersion: lockbox.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Secret +metadata: + annotations: + meta.upbound.io/example-id: lockbox/v1alpha1/secretiambinding + labels: + testing.upbound.io/example-name: your-secret + name: your-secret +spec: + forProvider: + name: secret-name diff --git a/examples-generated/lockbox/v1alpha1/secretversion.yaml b/examples-generated/lockbox/v1alpha1/secretversion.yaml new file mode 100644 index 0000000..50a2e0d --- /dev/null +++ b/examples-generated/lockbox/v1alpha1/secretversion.yaml @@ -0,0 +1,36 @@ +apiVersion: lockbox.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecretVersion +metadata: + annotations: + meta.upbound.io/example-id: lockbox/v1alpha1/secretversion + labels: + testing.upbound.io/example-name: my_version + name: my-version +spec: + forProvider: + entries: + - key: key1 + textValueSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + - command: + - path: my_secret_generator.sh + key: k2 + secretIdSelector: + matchLabels: + testing.upbound.io/example-name: my_secret + +--- + +apiVersion: lockbox.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Secret +metadata: + annotations: + meta.upbound.io/example-id: lockbox/v1alpha1/secretversion + labels: + testing.upbound.io/example-name: my_secret + name: my-secret +spec: + forProvider: + name: test secret diff --git a/examples-generated/logging/v1alpha1/group.yaml b/examples-generated/logging/v1alpha1/group.yaml new file mode 100644 index 0000000..a68d41b --- /dev/null +++ b/examples-generated/logging/v1alpha1/group.yaml @@ -0,0 +1,14 @@ +apiVersion: logging.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Group +metadata: + annotations: + meta.upbound.io/example-id: logging/v1alpha1/group + labels: + testing.upbound.io/example-name: group1 + name: group1 +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + name: test-logging-group diff --git a/examples-generated/mdb/v1alpha1/clickhousecluster.yaml b/examples-generated/mdb/v1alpha1/clickhousecluster.yaml new file mode 100644 index 0000000..df560b6 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/clickhousecluster.yaml @@ -0,0 +1,182 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ClickhouseCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/clickhousecluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clickhouse: + - config: + - backgroundPoolSize: 16 + backgroundSchedulePoolSize: 16 + compression: + - method: LZ4 + minPartSize: 1024 + minPartSizeRatio: 0.5 + - method: ZSTD + minPartSize: 2048 + minPartSizeRatio: 0.7 + geobaseUri: "" + graphiteRollup: + - name: rollup1 + pattern: + - function: func1 + regexp: abc + retention: + - age: 1000 + precision: 3 + - name: rollup2 + pattern: + - function: func2 + retention: + - age: 2000 + precision: 5 + kafka: + - saslMechanism: SASL_MECHANISM_GSSAPI + saslPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + saslUsername: user1 + securityProtocol: SECURITY_PROTOCOL_PLAINTEXT + kafkaTopic: + - name: topic1 + settings: + - saslMechanism: SASL_MECHANISM_SCRAM_SHA_256 + saslPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + saslUsername: user2 + securityProtocol: SECURITY_PROTOCOL_SSL + - name: topic2 + settings: + - saslMechanism: SASL_MECHANISM_PLAIN + securityProtocol: SECURITY_PROTOCOL_SASL_PLAINTEXT + keepAliveTimeout: 3000 + logLevel: TRACE + markCacheSize: 5368709120 + maxConcurrentQueries: 50 + maxConnections: 100 + maxPartitionSizeToDrop: 53687091200 + maxTableSizeToDrop: 53687091200 + mergeTree: + - maxBytesToMergeAtMaxSpaceInPool: 161061273600 + maxBytesToMergeAtMinSpaceInPool: 1048576 + maxReplicatedMergesInQueue: 16 + numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 8 + partsToDelayInsert: 150 + partsToThrowInsert: 300 + replicatedDeduplicationWindow: 100 + replicatedDeduplicationWindowSeconds: 604800 + metricLogEnabled: true + metricLogRetentionSize: 536870912 + metricLogRetentionTime: 2592000 + partLogRetentionSize: 536870912 + partLogRetentionTime: 2592000 + queryLogRetentionSize: 1073741824 + queryLogRetentionTime: 2592000 + queryThreadLogEnabled: true + queryThreadLogRetentionSize: 536870912 + queryThreadLogRetentionTime: 2592000 + rabbitmq: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: rabbit_user + textLogEnabled: true + textLogLevel: TRACE + textLogRetentionSize: 536870912 + textLogRetentionTime: 2592000 + timezone: UTC + traceLogEnabled: true + traceLogRetentionSize: 536870912 + traceLogRetentionTime: 2592000 + uncompressedCacheSize: 8589934592 + resources: + - diskSize: 32 + diskTypeId: network-ssd + resourcePresetId: s2.micro + cloudStorage: + - enabled: false + database: + - name: db_name + environment: PRESTABLE + formatSchema: + - name: test_schema + type: FORMAT_SCHEMA_TYPE_CAPNPROTO + uri: https://storage.yandexcloud.net/ch-data/schema.proto + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + type: CLICKHOUSE + zone: ru-central1-a + maintenanceWindow: + - type: ANYTIME + mlModel: + - name: test_model + type: ML_MODEL_TYPE_CATBOOST + uri: https://storage.yandexcloud.net/ch-data/train.csv + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + user: + - name: user + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + permission: + - databaseName: db_name + quota: + - errors: 1000 + intervalDuration: 3600000 + queries: 10000 + - errors: 5000 + intervalDuration: 79800000 + queries: 50000 + settings: + - maxMemoryUsageForUser: 1000000000 + outputFormatJsonQuote64BitIntegers: true + readOverflowMode: throw + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/clickhousecluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/clickhousecluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/elasticsearchcluster.yaml b/examples-generated/mdb/v1alpha1/elasticsearchcluster.yaml new file mode 100644 index 0000000..e66df14 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/elasticsearchcluster.yaml @@ -0,0 +1,67 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ElasticsearchCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/elasticsearchcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - adminPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + dataNode: + - resources: + - diskSize: 100 + diskTypeId: network-ssd + resourcePresetId: s2.micro + environment: PRESTABLE + host: + - assignPublicIp: true + name: node + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + type: DATA_NODE + zone: ru-central1-a + maintenanceWindow: + - type: ANYTIME + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/elasticsearchcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/elasticsearchcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/greenplumcluster.yaml b/examples-generated/mdb/v1alpha1/greenplumcluster.yaml new file mode 100644 index 0000000..35e3e14 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/greenplumcluster.yaml @@ -0,0 +1,109 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: GreenplumCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/greenplumcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + access: + - webSql: true + assignPublicIp: true + description: test greenplum cluster + environment: PRESTABLE + greenplumConfig: + gp_workfile_compression: "false" + max_connections: 395 + masterHostCount: 2 + masterSubcluster: + - resources: + - diskSize: 24 + diskTypeId: network-ssd + resourcePresetId: s2.micro + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + securityGroupIdsRefs: + - name: test-sg-x + segmentHostCount: 5 + segmentInHost: 1 + segmentSubcluster: + - resources: + - diskSize: 24 + diskTypeId: network-ssd + resourcePresetId: s2.micro + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + userName: admin_user + userPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + version: "6.22" + zone_id: ru-central1-a + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/greenplumcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/greenplumcluster + labels: + testing.upbound.io/example-name: test-sg-x + name: test-sg-x +spec: + forProvider: + egress: + - description: Allow outgoing traffic to members of the same security group + fromPort: 0 + protocol: ANY + toPort: 65535 + v4CidrBlocks: + - 0.0.0.0/0 + ingress: + - description: Allow incoming traffic from members of the same security group + fromPort: 0 + protocol: ANY + toPort: 65535 + v4CidrBlocks: + - 0.0.0.0/0 + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/greenplumcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/kafkacluster.yaml b/examples-generated/mdb/v1alpha1/kafkacluster.yaml new file mode 100644 index 0000000..b4f8231 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/kafkacluster.yaml @@ -0,0 +1,105 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: KafkaCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkacluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - assignPublicIp: false + brokersCount: 1 + kafka: + - kafkaConfig: + - compressionType: COMPRESSION_TYPE_ZSTD + defaultReplicationFactor: 1 + logFlushIntervalMessages: 1024 + logFlushIntervalMs: 1000 + logFlushSchedulerIntervalMs: 1000 + logPreallocate: true + logRetentionBytes: 1073741824 + logRetentionHours: 168 + logRetentionMinutes: 10080 + logRetentionMs: 86400000 + logSegmentBytes: 134217728 + messageMaxBytes: 1048588 + numPartitions: 10 + offsetsRetentionMinutes: 10080 + replicaFetchMaxBytes: 1048576 + saslEnabledMechanisms: + - SASL_MECHANISM_SCRAM_SHA_256 + - SASL_MECHANISM_SCRAM_SHA_512 + sslCipherSuites: + - TLS_DHE_RSA_WITH_AES_128_CBC_SHA + - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + resources: + - diskSize: 32 + diskTypeId: network-ssd + resourcePresetId: s2.micro + schemaRegistry: false + version: "2.8" + zones: + - ru-central1-a + environment: PRESTABLE + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + subnetIdsRefs: + - name: foo + user: + - name: producer-application + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + permission: + - allowHosts: + - host1.db.yandex.net + - host2.db.yandex.net + role: ACCESS_ROLE_PRODUCER + topicName: input + - name: worker + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + permission: + - role: ACCESS_ROLE_CONSUMER + topicName: input + - role: ACCESS_ROLE_PRODUCER + topicName: output + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkacluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkacluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/kafkaconnector.yaml b/examples-generated/mdb/v1alpha1/kafkaconnector.yaml new file mode 100644 index 0000000..81073fc --- /dev/null +++ b/examples-generated/mdb/v1alpha1/kafkaconnector.yaml @@ -0,0 +1,61 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: KafkaConnector +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkaconnector + labels: + testing.upbound.io/example-name: connector + name: connector +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + connectorConfigMirrormaker: + - replicationFactor: 1 + sourceCluster: + - alias: source + externalCluster: + - bootstrapServers: somebroker1:9091,somebroker2:9091 + saslMechanism: SCRAM-SHA-512 + saslPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + saslUsername: someuser + securityProtocol: SASL_SSL + targetCluster: + - alias: target + thisCluster: + - {} + topics: data.* + name: replication + properties: + refresh.topics.enabled: "true" + tasksMax: 3 + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: KafkaCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkaconnector + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - kafka: + - resources: + - diskSize: 16 + diskTypeId: network-hdd + resourcePresetId: s2.micro + version: "2.8" + zones: + - ru-central1-a + name: foo + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/mdb/v1alpha1/kafkatopic.yaml b/examples-generated/mdb/v1alpha1/kafkatopic.yaml new file mode 100644 index 0000000..c7e2c1e --- /dev/null +++ b/examples-generated/mdb/v1alpha1/kafkatopic.yaml @@ -0,0 +1,56 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: KafkaTopic +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkatopic + labels: + testing.upbound.io/example-name: events + name: events +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + name: events + partitions: 4 + replicationFactor: 1 + topicConfig: + - cleanupPolicy: CLEANUP_POLICY_COMPACT + compressionType: COMPRESSION_TYPE_LZ4 + deleteRetentionMs: 86400000 + fileDeleteDelayMs: 60000 + flushMessages: 128 + flushMs: 1000 + maxMessageBytes: 1048588 + minCompactionLagMs: 0 + minInsyncReplicas: 1 + preallocate: true + retentionBytes: 10737418240 + retentionMs: 604800000 + segmentBytes: 268435456 + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: KafkaCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkatopic + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - kafka: + - resources: + - diskSize: 16 + diskTypeId: network-hdd + resourcePresetId: s2.micro + version: "2.8" + zones: + - ru-central1-a + name: foo + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/mdb/v1alpha1/kafkauser.yaml b/examples-generated/mdb/v1alpha1/kafkauser.yaml new file mode 100644 index 0000000..d40a895 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/kafkauser.yaml @@ -0,0 +1,71 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: KafkaUser +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkauser + labels: + testing.upbound.io/example-name: user_events + name: user-events +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + name: user-events + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + permission: + - allowHosts: + - host1.db.yandex.net + - host2.db.yandex.net + role: ACCESS_ROLE_CONSUMER + topicName: events + - role: ACCESS_ROLE_PRODUCER + topicName: events + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: KafkaCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkauser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - kafka: + - resources: + - diskSize: 16 + diskTypeId: network-hdd + resourcePresetId: s2.micro + version: "2.8" + zones: + - ru-central1-a + name: foo + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: KafkaTopic +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/kafkauser + labels: + testing.upbound.io/example-name: events + name: events +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + name: events + partitions: 4 + replicationFactor: 1 diff --git a/examples-generated/mdb/v1alpha1/mongodbcluster.yaml b/examples-generated/mdb/v1alpha1/mongodbcluster.yaml new file mode 100644 index 0000000..59617f5 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/mongodbcluster.yaml @@ -0,0 +1,80 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MongodbCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clusterConfig: + - version: "4.2" + database: + - name: testdb + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zoneId: ru-central1-a + labels: + test_key: test_value + maintenanceWindow: + - type: ANYTIME + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + resourcesMongocfg: + - diskSize: 14 + diskTypeId: network-hdd + resourcePresetId: s2.small + resourcesMongod: + - diskSize: 16 + diskTypeId: network-hdd + resourcePresetId: s2.small + resourcesMongos: + - diskSize: 14 + diskTypeId: network-hdd + resourcePresetId: s2.small + user: + - name: john + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + permission: + - databaseName: testdb + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.1.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/mongodbdatabase.yaml b/examples-generated/mdb/v1alpha1/mongodbdatabase.yaml new file mode 100644 index 0000000..9655e63 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/mongodbdatabase.yaml @@ -0,0 +1,75 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MongodbDatabase +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbdatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + name: testdb + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MongodbCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbdatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clusterConfig: + - version: "6.0" + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zoneId: ru-central1-a + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + resourcesMongod: + - diskSize: 16 + diskTypeId: network-ssd + resourcePresetId: s2.micro + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbdatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbdatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/mongodbuser.yaml b/examples-generated/mdb/v1alpha1/mongodbuser.yaml new file mode 100644 index 0000000..20077a8 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/mongodbuser.yaml @@ -0,0 +1,79 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MongodbUser +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbuser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + name: alice + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MongodbCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbuser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clusterConfig: + - version: "6.0" + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zoneId: ru-central1-a + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + resourcesMongod: + - diskSize: 16 + diskTypeId: network-ssd + resourcePresetId: s2.micro + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbuser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mongodbuser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/mysqlcluster.yaml b/examples-generated/mdb/v1alpha1/mysqlcluster.yaml new file mode 100644 index 0000000..8ba4f53 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/mysqlcluster.yaml @@ -0,0 +1,62 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MySQLCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqlcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zone: ru-central1-a + mysqlConfig: + default_authentication_plugin: MYSQL_NATIVE_PASSWORD + innodb_print_all_deadlocks: true + max_connections: 100 + sql_mode: ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + resources: + - diskSize: 16 + diskTypeId: network-ssd + resourcePresetId: s2.micro + version: "8.0" + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqlcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqlcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/mysqldatabase.yaml b/examples-generated/mdb/v1alpha1/mysqldatabase.yaml new file mode 100644 index 0000000..052552a --- /dev/null +++ b/examples-generated/mdb/v1alpha1/mysqldatabase.yaml @@ -0,0 +1,74 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MySQLDatabase +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqldatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + name: testdb + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MySQLCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqldatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zone: ru-central1-a + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + resources: + - diskSize: 16 + diskTypeId: network-ssd + resourcePresetId: s2.micro + version: "8.0" + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqldatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqldatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/mysqluser.yaml b/examples-generated/mdb/v1alpha1/mysqluser.yaml new file mode 100644 index 0000000..ae82095 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/mysqluser.yaml @@ -0,0 +1,99 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MySQLUser +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqluser + labels: + testing.upbound.io/example-name: john + name: john +spec: + forProvider: + authenticationPlugin: SHA256_PASSWORD + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + connectionLimits: + - maxConnectionsPerHour: 30 + maxQuestionsPerHour: 10 + maxUpdatesPerHour: 20 + maxUserConnections: 40 + globalPermissions: + - PROCESS + name: john + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + permission: + - databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: testdb + roles: + - ALL + - databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: new_testdb + roles: + - ALL + - INSERT + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: MySQLCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqluser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - resources: + - disk_size: 16 + disk_type_id: network-ssd + resource_preset_id: s2.micro + version: 14 + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zone: ru-central1-a + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqluser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/mysqluser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/postgresqlcluster.yaml b/examples-generated/mdb/v1alpha1/postgresqlcluster.yaml new file mode 100644 index 0000000..fbeda4c --- /dev/null +++ b/examples-generated/mdb/v1alpha1/postgresqlcluster.yaml @@ -0,0 +1,68 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PostgresqlCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqlcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - postgresqlConfig: + autovacuum_vacuum_scale_factor: 0.34 + default_transaction_isolation: TRANSACTION_ISOLATION_READ_COMMITTED + enable_parallel_hash: true + max_connections: 395 + shared_preload_libraries: SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN,SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN + resources: + - diskSize: 16 + diskTypeId: network-ssd + resourcePresetId: s2.micro + version: 15 + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zone: ru-central1-a + maintenanceWindow: + - day: SAT + hour: 12 + type: WEEKLY + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqlcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqlcluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/postgresqldatabase.yaml b/examples-generated/mdb/v1alpha1/postgresqldatabase.yaml new file mode 100644 index 0000000..05d5e9a --- /dev/null +++ b/examples-generated/mdb/v1alpha1/postgresqldatabase.yaml @@ -0,0 +1,104 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PostgresqlDatabase +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqldatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + extension: + - name: uuid-ossp + - name: xml2 + lcCollate: en_US.UTF-8 + lcType: en_US.UTF-8 + name: testdb + ownerSelector: + matchLabels: + testing.upbound.io/example-name: alice + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PostgresqlCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqldatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - resources: + - diskSize: 16 + diskTypeId: network-ssd + resourcePresetId: s2.micro + version: 15 + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zone: ru-central1-a + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PostgresqlUser +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqldatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + name: alice + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqldatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqldatabase + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/postgresqluser.yaml b/examples-generated/mdb/v1alpha1/postgresqluser.yaml new file mode 100644 index 0000000..c7ce487 --- /dev/null +++ b/examples-generated/mdb/v1alpha1/postgresqluser.yaml @@ -0,0 +1,83 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PostgresqlUser +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqluser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + clusterIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + connLimit: 50 + name: alice + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + settings: + default_transaction_isolation: read committed + log_min_duration_statement: 5000 + +--- + +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PostgresqlCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqluser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - resources: + - diskSize: 16 + diskTypeId: network-ssd + resourcePresetId: s2.micro + version: 15 + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zone: ru-central1-a + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqluser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/postgresqluser + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/rediscluster.yaml b/examples-generated/mdb/v1alpha1/rediscluster.yaml new file mode 100644 index 0000000..f14651f --- /dev/null +++ b/examples-generated/mdb/v1alpha1/rediscluster.yaml @@ -0,0 +1,63 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: RedisCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/rediscluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + config: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + version: "6.2" + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zone: ru-central1-a + maintenanceWindow: + - type: ANYTIME + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + resources: + - diskSize: 16 + resourcePresetId: hm1.nano + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/rediscluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/rediscluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/mdb/v1alpha1/sqlservercluster.yaml b/examples-generated/mdb/v1alpha1/sqlservercluster.yaml new file mode 100644 index 0000000..32a280f --- /dev/null +++ b/examples-generated/mdb/v1alpha1/sqlservercluster.yaml @@ -0,0 +1,136 @@ +apiVersion: mdb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SqlserverCluster +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/sqlservercluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + backupWindowStart: + - hours: 20 + minutes: 30 + database: + - name: db_name_a + - name: db_name + - name: db_name_b + environment: PRESTABLE + host: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + zone: ru-central1-a + hostGroupIds: + - host_group_1 + - host_group_2 + labels: + test_key: test_value + name: test + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + resources: + - diskSize: 20 + diskTypeId: network-ssd + resourcePresetId: s2.small + securityGroupIdsRefs: + - name: test-sg-x + sqlserverConfig: + fill_factor_percent: 49 + optimize_for_ad_hoc_workloads: true + user: + - name: bob + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + - name: alice + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + permission: + - databaseName: db_name + roles: + - DDLADMIN + - name: chuck + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + permission: + - databaseName: db_name_a + roles: + - OWNER + - databaseName: db_name + roles: + - OWNER + - DDLADMIN + - databaseName: db_name_b + roles: + - OWNER + - DDLADMIN + version: 2016sp2std + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/sqlservercluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/sqlservercluster + labels: + testing.upbound.io/example-name: test-sg-x + name: test-sg-x +spec: + forProvider: + egress: + - description: Allow outgoing traffic to members of the same security group + fromPort: 0 + protocol: ANY + toPort: 65535 + v4CidrBlocks: + - 0.0.0.0/0 + ingress: + - description: Allow incoming traffic from members of the same security group + fromPort: 0 + protocol: ANY + toPort: 65535 + v4CidrBlocks: + - 0.0.0.0/0 + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: mdb/v1alpha1/sqlservercluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/message/v1alpha1/queue.yaml b/examples-generated/message/v1alpha1/queue.yaml new file mode 100644 index 0000000..9d06698 --- /dev/null +++ b/examples-generated/message/v1alpha1/queue.yaml @@ -0,0 +1,19 @@ +apiVersion: message.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Queue +metadata: + annotations: + meta.upbound.io/example-id: message/v1alpha1/queue + labels: + testing.upbound.io/example-name: example_queue + name: example-queue +spec: + forProvider: + messageRetentionSeconds: 1209600 + name: ymq_terraform_example + receiveWaitTimeSeconds: 20 + redrivePolicy: |- + ${jsonencode({ + deadLetterTargetArn = yandex_message_queue.example_deadletter_queue.arn + maxReceiveCount = 3 + })} + visibilityTimeoutSeconds: 600 diff --git a/examples-generated/monitoring/v1alpha1/dashboard.yaml b/examples-generated/monitoring/v1alpha1/dashboard.yaml new file mode 100644 index 0000000..e38b7e4 --- /dev/null +++ b/examples-generated/monitoring/v1alpha1/dashboard.yaml @@ -0,0 +1,128 @@ +apiVersion: monitoring.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Dashboard +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1alpha1/dashboard + labels: + testing.upbound.io/example-name: my-dashboard + name: my-dashboard +spec: + forProvider: + description: Description + labels: + a: b + name: local-id-resource + parametrization: + - parameters: + - custom: + - defaultValues: + - "1" + - "2" + multiselectable: true + values: + - "1" + - "2" + - "3" + description: param1 description + hidden: false + id: param1 + title: title + - hidden: true + id: param2 + labelValues: + - defaultValues: + - "1" + - "2" + labelKey: key + multiselectable: true + selectors: a=b + - hidden: true + id: param3 + text: + - defaultValue: abc + selectors: a=b + title: My title + widgets: + - position: + - h: 1 + w: 1 + x: 4 + "y": 4 + text: + - text: text here + - chart: + - chartId: chart1id + description: chart description + displayLegend: true + freeze: FREEZE_DURATION_HOUR + nameHidingSettings: + - names: + - a + - b + positive: true + queries: + - downsampling: + - disabled: false + gapFilling: GAP_FILLING_NULL + gridAggregation: GRID_AGGREGATION_COUNT + maxPoints: 100 + target: + - hidden: true + query: '{service=monitoring}' + textMode: true + seriesOverrides: + - name: name + settings: + - color: colorValue + growDown: true + name: series_overrides name + stackName: stack name + type: SERIES_VISUALIZATION_TYPE_LINE + yaxisPosition: YAXIS_POSITION_LEFT + title: title for chart + visualizationSettings: + - aggregation: SERIES_AGGREGATION_AVG + colorSchemeSettings: + - gradient: + - greenValue: "11" + redValue: "22" + violetValue: "33" + yellowValue: "44" + heatmapSettings: + - greenValue: "1" + redValue: "2" + violetValue: "3" + yellowValue: "4" + interpolate: INTERPOLATE_LEFT + normalize: true + showLabels: true + title: visualization_settings title + type: VISUALIZATION_TYPE_POINTS + yaxisSettings: + - left: + - max: "111" + min: "11" + precision: 3 + title: yaxis_settings left title + type: YAXIS_TYPE_LOGARITHMIC + unitFormat: UNIT_CELSIUS + right: + - max: "22" + min: "2" + precision: 2 + title: yaxis_settings right title + type: YAXIS_TYPE_LOGARITHMIC + unitFormat: UNIT_NONE + position: + - h: 100 + w: 100 + x: 6 + "y": 6 + - position: + - h: 1 + w: 1 + x: 1 + "y": 1 + title: + - size: TITLE_SIZE_XS + text: title here diff --git a/examples-generated/organizationmanager/v1alpha1/group.yaml b/examples-generated/organizationmanager/v1alpha1/group.yaml new file mode 100644 index 0000000..8b0534e --- /dev/null +++ b/examples-generated/organizationmanager/v1alpha1/group.yaml @@ -0,0 +1,13 @@ +apiVersion: organizationmanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Group +metadata: + annotations: + meta.upbound.io/example-id: organizationmanager/v1alpha1/group + labels: + testing.upbound.io/example-name: group + name: group +spec: + forProvider: + description: My new Group + name: my-group + organizationId: sdf4*********3fr diff --git a/examples-generated/organizationmanager/v1alpha1/groupiammember.yaml b/examples-generated/organizationmanager/v1alpha1/groupiammember.yaml new file mode 100644 index 0000000..23ce116 --- /dev/null +++ b/examples-generated/organizationmanager/v1alpha1/groupiammember.yaml @@ -0,0 +1,15 @@ +apiVersion: organizationmanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: GroupIAMMember +metadata: + annotations: + meta.upbound.io/example-id: organizationmanager/v1alpha1/groupiammember + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + groupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + member: userAccount:user_id + role: editor diff --git a/examples-generated/organizationmanager/v1alpha1/groupmembership.yaml b/examples-generated/organizationmanager/v1alpha1/groupmembership.yaml new file mode 100644 index 0000000..3ea687b --- /dev/null +++ b/examples-generated/organizationmanager/v1alpha1/groupmembership.yaml @@ -0,0 +1,15 @@ +apiVersion: organizationmanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: GroupMembership +metadata: + annotations: + meta.upbound.io/example-id: organizationmanager/v1alpha1/groupmembership + labels: + testing.upbound.io/example-name: group + name: group +spec: + forProvider: + groupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + members: + - xdf********123 diff --git a/examples-generated/organizationmanager/v1alpha1/organizationiambinding.yaml b/examples-generated/organizationmanager/v1alpha1/organizationiambinding.yaml new file mode 100644 index 0000000..8ea1034 --- /dev/null +++ b/examples-generated/organizationmanager/v1alpha1/organizationiambinding.yaml @@ -0,0 +1,14 @@ +apiVersion: organizationmanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: OrganizationIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: organizationmanager/v1alpha1/organizationiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + members: + - userAccount:some_user_id + organizationId: some_organization_id + role: editor diff --git a/examples-generated/organizationmanager/v1alpha1/organizationiammember.yaml b/examples-generated/organizationmanager/v1alpha1/organizationiammember.yaml new file mode 100644 index 0000000..a9ae925 --- /dev/null +++ b/examples-generated/organizationmanager/v1alpha1/organizationiammember.yaml @@ -0,0 +1,13 @@ +apiVersion: organizationmanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: OrganizationIAMMember +metadata: + annotations: + meta.upbound.io/example-id: organizationmanager/v1alpha1/organizationiammember + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + member: userAccount:user_id + organizationId: some_organization_id + role: editor diff --git a/examples-generated/organizationmanager/v1alpha1/osloginsettings.yaml b/examples-generated/organizationmanager/v1alpha1/osloginsettings.yaml new file mode 100644 index 0000000..71c65b4 --- /dev/null +++ b/examples-generated/organizationmanager/v1alpha1/osloginsettings.yaml @@ -0,0 +1,16 @@ +apiVersion: organizationmanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: OsLoginSettings +metadata: + annotations: + meta.upbound.io/example-id: organizationmanager/v1alpha1/osloginsettings + labels: + testing.upbound.io/example-name: my_os_login_settings + name: my-os-login-settings +spec: + forProvider: + organizationId: sdf4*********3fr + sshCertificateSettings: + - enabled: true + userSshKeySettings: + - allowManageOwnKeys: true + enabled: true diff --git a/examples-generated/organizationmanager/v1alpha1/samlfederation.yaml b/examples-generated/organizationmanager/v1alpha1/samlfederation.yaml new file mode 100644 index 0000000..d1f843b --- /dev/null +++ b/examples-generated/organizationmanager/v1alpha1/samlfederation.yaml @@ -0,0 +1,16 @@ +apiVersion: organizationmanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SAMLFederation +metadata: + annotations: + meta.upbound.io/example-id: organizationmanager/v1alpha1/samlfederation + labels: + testing.upbound.io/example-name: federation + name: federation +spec: + forProvider: + description: My new SAML federation + issuer: my-issuer + name: my-federation + organizationId: sdf4*********3fr + ssoBinding: POST + ssoUrl: https://my-sso.url diff --git a/examples-generated/organizationmanager/v1alpha1/samlfederationuseraccount.yaml b/examples-generated/organizationmanager/v1alpha1/samlfederationuseraccount.yaml new file mode 100644 index 0000000..9422afe --- /dev/null +++ b/examples-generated/organizationmanager/v1alpha1/samlfederationuseraccount.yaml @@ -0,0 +1,12 @@ +apiVersion: organizationmanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SAMLFederationUserAccount +metadata: + annotations: + meta.upbound.io/example-id: organizationmanager/v1alpha1/samlfederationuseraccount + labels: + testing.upbound.io/example-name: account + name: account +spec: + forProvider: + federationId: some_federation_id + nameId: example@example.org diff --git a/examples-generated/organizationmanager/v1alpha1/usersshkey.yaml b/examples-generated/organizationmanager/v1alpha1/usersshkey.yaml new file mode 100644 index 0000000..fd073bc --- /dev/null +++ b/examples-generated/organizationmanager/v1alpha1/usersshkey.yaml @@ -0,0 +1,13 @@ +apiVersion: organizationmanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: UserSSHKey +metadata: + annotations: + meta.upbound.io/example-id: organizationmanager/v1alpha1/usersshkey + labels: + testing.upbound.io/example-name: my_user_ssh_key + name: my-user-ssh-key +spec: + forProvider: + data: ssh_key_data + organizationId: some_organization_id + subjectId: some_subject_id diff --git a/examples-generated/resourcemanager/v1alpha1/cloud.yaml b/examples-generated/resourcemanager/v1alpha1/cloud.yaml new file mode 100644 index 0000000..837b1a6 --- /dev/null +++ b/examples-generated/resourcemanager/v1alpha1/cloud.yaml @@ -0,0 +1,11 @@ +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Cloud +metadata: + annotations: + meta.upbound.io/example-id: resourcemanager/v1alpha1/cloud + labels: + testing.upbound.io/example-name: cloud1 + name: cloud1 +spec: + forProvider: + organizationId: my_organization_id diff --git a/examples-generated/resourcemanager/v1alpha1/cloudiambinding.yaml b/examples-generated/resourcemanager/v1alpha1/cloudiambinding.yaml new file mode 100644 index 0000000..d0bac17 --- /dev/null +++ b/examples-generated/resourcemanager/v1alpha1/cloudiambinding.yaml @@ -0,0 +1,16 @@ +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CloudIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: resourcemanager/v1alpha1/cloudiambinding + labels: + testing.upbound.io/example-name: admin + name: admin +spec: + forProvider: + cloudIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_cloud + members: + - userAccount:some_user_id + role: editor diff --git a/examples-generated/resourcemanager/v1alpha1/cloudiammember.yaml b/examples-generated/resourcemanager/v1alpha1/cloudiammember.yaml new file mode 100644 index 0000000..55a1ed6 --- /dev/null +++ b/examples-generated/resourcemanager/v1alpha1/cloudiammember.yaml @@ -0,0 +1,15 @@ +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CloudIAMMember +metadata: + annotations: + meta.upbound.io/example-id: resourcemanager/v1alpha1/cloudiammember + labels: + testing.upbound.io/example-name: admin + name: admin +spec: + forProvider: + cloudIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_cloud + member: userAccount:user_id + role: editor diff --git a/examples-generated/resourcemanager/v1alpha1/folder.yaml b/examples-generated/resourcemanager/v1alpha1/folder.yaml new file mode 100644 index 0000000..ff43235 --- /dev/null +++ b/examples-generated/resourcemanager/v1alpha1/folder.yaml @@ -0,0 +1,13 @@ +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Folder +metadata: + annotations: + meta.upbound.io/example-id: resourcemanager/v1alpha1/folder + labels: + testing.upbound.io/example-name: folder1 + name: folder1 +spec: + forProvider: + cloudIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/resourcemanager/v1alpha1/folderiambinding.yaml b/examples-generated/resourcemanager/v1alpha1/folderiambinding.yaml new file mode 100644 index 0000000..ca81b2e --- /dev/null +++ b/examples-generated/resourcemanager/v1alpha1/folderiambinding.yaml @@ -0,0 +1,16 @@ +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FolderIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: resourcemanager/v1alpha1/folderiambinding + labels: + testing.upbound.io/example-name: admin + name: admin +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + members: + - userAccount:some_user_id + role: editor diff --git a/examples-generated/resourcemanager/v1alpha1/folderiammember.yaml b/examples-generated/resourcemanager/v1alpha1/folderiammember.yaml new file mode 100644 index 0000000..8363f0e --- /dev/null +++ b/examples-generated/resourcemanager/v1alpha1/folderiammember.yaml @@ -0,0 +1,15 @@ +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FolderIAMMember +metadata: + annotations: + meta.upbound.io/example-id: resourcemanager/v1alpha1/folderiammember + labels: + testing.upbound.io/example-name: admin + name: admin +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager + member: userAccount:user_id + role: editor diff --git a/examples-generated/resourcemanager/v1alpha1/folderiampolicy.yaml b/examples-generated/resourcemanager/v1alpha1/folderiampolicy.yaml new file mode 100644 index 0000000..5ff1aa1 --- /dev/null +++ b/examples-generated/resourcemanager/v1alpha1/folderiampolicy.yaml @@ -0,0 +1,14 @@ +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FolderIAMPolicy +metadata: + annotations: + meta.upbound.io/example-id: resourcemanager/v1alpha1/folderiampolicy + labels: + testing.upbound.io/example-name: folder_admin_policy + name: folder-admin-policy +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_folder + policyData: ${data.yandex_iam_policy.admin.policy_data} diff --git a/examples-generated/serverless/v1alpha1/container.yaml b/examples-generated/serverless/v1alpha1/container.yaml new file mode 100644 index 0000000..1581fa8 --- /dev/null +++ b/examples-generated/serverless/v1alpha1/container.yaml @@ -0,0 +1,39 @@ +apiVersion: serverless.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: serverless/v1alpha1/container + labels: + testing.upbound.io/example-name: test-container + name: test-container +spec: + forProvider: + coreFraction: 100 + cores: 1 + description: any description + executionTimeout: 15s + image: + - url: cr.yandex/yc/test-image:v1 + logOptions: + - logGroupId: e2392vo6d1bne2aeq9fr + minLevel: ERROR + memory: 256 + mounts: + - ephemeralDisk: + - sizeGb: 5 + mountPointPath: /mount/point + name: some_name + provisionPolicy: + - minInstances: 1 + secrets: + - environmentVariable: ENV_VARIABLE + idSelector: + matchLabels: + testing.upbound.io/example-name: secret + key: secret-key + versionIdSelector: + matchLabels: + testing.upbound.io/example-name: secret_version + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/serverless/v1alpha1/containeriambinding.yaml b/examples-generated/serverless/v1alpha1/containeriambinding.yaml new file mode 100644 index 0000000..46658b8 --- /dev/null +++ b/examples-generated/serverless/v1alpha1/containeriambinding.yaml @@ -0,0 +1,14 @@ +apiVersion: serverless.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ContainerIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: serverless/v1alpha1/containeriambinding + labels: + testing.upbound.io/example-name: container-iam + name: container-iam +spec: + forProvider: + containerId: your-container-id + members: + - system:allUsers + role: serverless.containers.invoker diff --git a/examples-generated/storage/v1alpha1/bucket.yaml b/examples-generated/storage/v1alpha1/bucket.yaml new file mode 100644 index 0000000..af28b15 --- /dev/null +++ b/examples-generated/storage/v1alpha1/bucket.yaml @@ -0,0 +1,11 @@ +apiVersion: storage.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Bucket +metadata: + annotations: + meta.upbound.io/example-id: storage/v1alpha1/bucket + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + bucket: tf-test-bucket diff --git a/examples-generated/storage/v1alpha1/object.yaml b/examples-generated/storage/v1alpha1/object.yaml new file mode 100644 index 0000000..4f880ee --- /dev/null +++ b/examples-generated/storage/v1alpha1/object.yaml @@ -0,0 +1,17 @@ +apiVersion: storage.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Object +metadata: + annotations: + meta.upbound.io/example-id: storage/v1alpha1/object + labels: + testing.upbound.io/example-name: cute-cat-picture + name: cute-cat-picture +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + key: cute-cat + source: /images/cats/cute-cat.jpg + tags: + test: value diff --git a/examples-generated/sws/v1alpha1/securityprofile.yaml b/examples-generated/sws/v1alpha1/securityprofile.yaml new file mode 100644 index 0000000..68b265a --- /dev/null +++ b/examples-generated/sws/v1alpha1/securityprofile.yaml @@ -0,0 +1,17 @@ +apiVersion: sws.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecurityProfile +metadata: + annotations: + meta.upbound.io/example-id: sws/v1alpha1/securityprofile + labels: + testing.upbound.io/example-name: demo-profile-simple + name: demo-profile-simple +spec: + forProvider: + defaultAction: ALLOW + name: demo-profile-simple + securityRule: + - name: smart-protection + priority: 99999 + smartProtection: + - mode: API diff --git a/examples-generated/vpc/v1alpha1/address.yaml b/examples-generated/vpc/v1alpha1/address.yaml new file mode 100644 index 0000000..ebdb3d5 --- /dev/null +++ b/examples-generated/vpc/v1alpha1/address.yaml @@ -0,0 +1,13 @@ +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Address +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/address + labels: + testing.upbound.io/example-name: addr + name: addr +spec: + forProvider: + externalIpv4Address: + - zoneId: ru-central1-a + name: exampleAddress diff --git a/examples-generated/vpc/v1alpha1/defaultsecuritygroup.yaml b/examples-generated/vpc/v1alpha1/defaultsecuritygroup.yaml new file mode 100644 index 0000000..461419d --- /dev/null +++ b/examples-generated/vpc/v1alpha1/defaultsecuritygroup.yaml @@ -0,0 +1,51 @@ +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DefaultSecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/defaultsecuritygroup + labels: + testing.upbound.io/example-name: default-sg + name: default-sg +spec: + forProvider: + description: description for default security group + egress: + - description: rule2 description + fromPort: 8090 + protocol: ANY + toPort: 8099 + v4CidrBlocks: + - 10.0.1.0/24 + - 10.0.2.0/24 + - description: rule3 description + fromPort: 8090 + protocol: UDP + toPort: 8099 + v4CidrBlocks: + - 10.0.1.0/24 + ingress: + - description: rule1 description + port: 8080 + protocol: TCP + v4CidrBlocks: + - 10.0.1.0/24 + - 10.0.2.0/24 + labels: + my-label: my-label-value + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: lab-net + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/defaultsecuritygroup + labels: + testing.upbound.io/example-name: lab-net + name: lab-net +spec: + forProvider: + name: lab-network diff --git a/examples-generated/vpc/v1alpha1/gateway.yaml b/examples-generated/vpc/v1alpha1/gateway.yaml new file mode 100644 index 0000000..f205532 --- /dev/null +++ b/examples-generated/vpc/v1alpha1/gateway.yaml @@ -0,0 +1,13 @@ +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Gateway +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/gateway + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + name: foobar + sharedEgressGateway: + - {} diff --git a/examples-generated/vpc/v1alpha1/network.yaml b/examples-generated/vpc/v1alpha1/network.yaml new file mode 100644 index 0000000..0d43597 --- /dev/null +++ b/examples-generated/vpc/v1alpha1/network.yaml @@ -0,0 +1,11 @@ +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/network + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + name: foobar diff --git a/examples-generated/vpc/v1alpha1/privateendpoint.yaml b/examples-generated/vpc/v1alpha1/privateendpoint.yaml new file mode 100644 index 0000000..b276e8b --- /dev/null +++ b/examples-generated/vpc/v1alpha1/privateendpoint.yaml @@ -0,0 +1,58 @@ +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PrivateEndpoint +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/privateendpoint + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + description: description for private endpoint + dnsOptions: + - privateDnsRecordsEnabled: true + endpointAddress: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: lab-subnet-a + labels: + my-label: my-label-value + name: object-storage-private-endpoint + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: lab-net + objectStorage: + - {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/privateendpoint + labels: + testing.upbound.io/example-name: lab-net + name: lab-net +spec: + forProvider: + name: lab-network + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/privateendpoint + labels: + testing.upbound.io/example-name: lab-subnet-a + name: lab-subnet-a +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: lab-net + v4CidrBlocks: + - 10.2.0.0/16 + zone: ru-central1-a diff --git a/examples-generated/vpc/v1alpha1/routetable.yaml b/examples-generated/vpc/v1alpha1/routetable.yaml new file mode 100644 index 0000000..4dc5521 --- /dev/null +++ b/examples-generated/vpc/v1alpha1/routetable.yaml @@ -0,0 +1,50 @@ +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: RouteTable +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/routetable + labels: + testing.upbound.io/example-name: lab-rt-a + name: lab-rt-a +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: lab-net + staticRoute: + - destinationPrefix: 10.2.0.0/16 + nextHopAddress: 172.16.10.10 + - destinationPrefix: 0.0.0.0/0 + gatewayIdSelector: + matchLabels: + testing.upbound.io/example-name: egress-gateway + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Gateway +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/routetable + labels: + testing.upbound.io/example-name: egress-gateway + name: egress-gateway +spec: + forProvider: + name: egress-gateway + sharedEgressGateway: + - {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/routetable + labels: + testing.upbound.io/example-name: lab-net + name: lab-net +spec: + forProvider: + name: lab-network diff --git a/examples-generated/vpc/v1alpha1/securitygroup.yaml b/examples-generated/vpc/v1alpha1/securitygroup.yaml new file mode 100644 index 0000000..6030eed --- /dev/null +++ b/examples-generated/vpc/v1alpha1/securitygroup.yaml @@ -0,0 +1,52 @@ +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/securitygroup + labels: + testing.upbound.io/example-name: group1 + name: group1 +spec: + forProvider: + description: description for my security group + egress: + - description: rule2 description + fromPort: 8090 + protocol: ANY + toPort: 8099 + v4CidrBlocks: + - 10.0.1.0/24 + - 10.0.2.0/24 + - description: rule3 description + fromPort: 8090 + protocol: UDP + toPort: 8099 + v4CidrBlocks: + - 10.0.1.0/24 + ingress: + - description: rule1 description + port: 8080 + protocol: TCP + v4CidrBlocks: + - 10.0.1.0/24 + - 10.0.2.0/24 + labels: + my-label: my-label-value + name: My security group + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: lab-net + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/securitygroup + labels: + testing.upbound.io/example-name: lab-net + name: lab-net +spec: + forProvider: + name: lab-network diff --git a/examples-generated/vpc/v1alpha1/securitygrouprule.yaml b/examples-generated/vpc/v1alpha1/securitygrouprule.yaml new file mode 100644 index 0000000..76a9d39 --- /dev/null +++ b/examples-generated/vpc/v1alpha1/securitygrouprule.yaml @@ -0,0 +1,54 @@ +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecurityGroupRule +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/securitygrouprule + labels: + testing.upbound.io/example-name: rule1 + name: rule1 +spec: + forProvider: + description: rule1 description + direction: ingress + port: 8080 + protocol: TCP + securityGroupBindingSelector: + matchLabels: + testing.upbound.io/example-name: group1 + v4CidrBlocks: + - 10.0.1.0/24 + - 10.0.2.0/24 + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/securitygrouprule + labels: + testing.upbound.io/example-name: lab-net + name: lab-net +spec: + forProvider: + name: lab-network + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/securitygrouprule + labels: + testing.upbound.io/example-name: group1 + name: group1 +spec: + forProvider: + description: description for my security group + labels: + my-label: my-label-value + name: My security group + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: lab-net diff --git a/examples-generated/vpc/v1alpha1/subnet.yaml b/examples-generated/vpc/v1alpha1/subnet.yaml new file mode 100644 index 0000000..590025e --- /dev/null +++ b/examples-generated/vpc/v1alpha1/subnet.yaml @@ -0,0 +1,30 @@ +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/subnet + labels: + testing.upbound.io/example-name: lab-subnet-a + name: lab-subnet-a +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: lab-net + v4CidrBlocks: + - 10.2.0.0/16 + zone: ru-central1-a + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: vpc/v1alpha1/subnet + labels: + testing.upbound.io/example-name: lab-net + name: lab-net +spec: + forProvider: + name: lab-network diff --git a/examples-generated/yandex/v1alpha1/function.yaml b/examples-generated/yandex/v1alpha1/function.yaml new file mode 100644 index 0000000..37b8a40 --- /dev/null +++ b/examples-generated/yandex/v1alpha1/function.yaml @@ -0,0 +1,47 @@ +apiVersion: yandex.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Function +metadata: + annotations: + meta.upbound.io/example-id: yandex/v1alpha1/function + labels: + testing.upbound.io/example-name: test-function + name: test-function +spec: + forProvider: + asyncInvocation: + - retriesCount: "3" + services_account_id: ajeihp9qsfg2l6f838kk + ymqFailureTarget: + - arn: yrn:yc:ymq:ru-central1:b1glraqqa1i7tmh9hsfp:fail + serviceAccountId: ajeqr0pjpbrkovcqb76m + ymqSuccessTarget: + - arn: yrn:yc:ymq:ru-central1:b1glraqqa1i7tmh9hsfp:success + serviceAccountId: ajeqr0pjpbrkovcqb76m + content: + - zipFilename: function.zip + description: any description + entrypoint: main + executionTimeout: "10" + logOptions: + - logGroupId: e2392vo6d1bne2aeq9fr + minLevel: ERROR + memory: "128" + mounts: + - ephemeralDisk: + - sizeGb: 32 + name: mnt + name: some_name + runtime: python37 + secrets: + - environmentVariable: ENV_VARIABLE + idSelector: + matchLabels: + testing.upbound.io/example-name: secret + key: secret-key + versionIdSelector: + matchLabels: + testing.upbound.io/example-name: secret_version + serviceAccountId: are1service2account3id + tags: + - my_tag + userHash: any_user_defined_string diff --git a/examples-generated/ydb/v1alpha1/databasededicated.yaml b/examples-generated/ydb/v1alpha1/databasededicated.yaml new file mode 100644 index 0000000..4302ea6 --- /dev/null +++ b/examples-generated/ydb/v1alpha1/databasededicated.yaml @@ -0,0 +1,30 @@ +apiVersion: ydb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DatabaseDedicated +metadata: + annotations: + meta.upbound.io/example-id: ydb/v1alpha1/databasededicated + labels: + testing.upbound.io/example-name: database1 + name: database1 +spec: + forProvider: + deletionProtection: true + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + location: + - region: + - id: ru-central1 + name: test-ydb-dedicated + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: my-inst-group-network + resourcePresetId: medium + scalePolicy: + - fixedScale: + - size: 1 + storageConfig: + - groupCount: 1 + storageTypeId: ssd + subnetIdsRefs: + - name: my-inst-group-subnet diff --git a/examples-generated/ydb/v1alpha1/databaseiambinding.yaml b/examples-generated/ydb/v1alpha1/databaseiambinding.yaml new file mode 100644 index 0000000..5996c7b --- /dev/null +++ b/examples-generated/ydb/v1alpha1/databaseiambinding.yaml @@ -0,0 +1,33 @@ +apiVersion: ydb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DatabaseIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: ydb/v1alpha1/databaseiambinding + labels: + testing.upbound.io/example-name: viewer + name: viewer +spec: + forProvider: + databaseIdSelector: + matchLabels: + testing.upbound.io/example-name: database1 + role: ydb.viewer + serviceAccountRef: + - name: example + +--- + +apiVersion: ydb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DatabaseServerless +metadata: + annotations: + meta.upbound.io/example-id: ydb/v1alpha1/databaseiambinding + labels: + testing.upbound.io/example-name: database1 + name: database1 +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + name: test-ydb-serverless diff --git a/examples-generated/ydb/v1alpha1/databaseserverless.yaml b/examples-generated/ydb/v1alpha1/databaseserverless.yaml new file mode 100644 index 0000000..06f35e7 --- /dev/null +++ b/examples-generated/ydb/v1alpha1/databaseserverless.yaml @@ -0,0 +1,15 @@ +apiVersion: ydb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DatabaseServerless +metadata: + annotations: + meta.upbound.io/example-id: ydb/v1alpha1/databaseserverless + labels: + testing.upbound.io/example-name: database1 + name: database1 +spec: + forProvider: + deletionProtection: true + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + name: test-ydb-serverless diff --git a/examples-generated/ydb/v1alpha1/table.yaml b/examples-generated/ydb/v1alpha1/table.yaml new file mode 100644 index 0000000..d8826ea --- /dev/null +++ b/examples-generated/ydb/v1alpha1/table.yaml @@ -0,0 +1,29 @@ +apiVersion: ydb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Table +metadata: + annotations: + meta.upbound.io/example-id: ydb/v1alpha1/table + labels: + testing.upbound.io/example-name: test_table + name: test-table +spec: + forProvider: + column: + - name: a + notNull: true + type: Utf8 + - name: b + notNull: true + type: Uint32 + - name: c + notNull: false + type: Int32 + - name: d + type: Timestamp + connectionStringSelector: + matchLabels: + testing.upbound.io/example-name: database1 + path: test_dir/test_table_3_col + primaryKey: + - a + - b”] diff --git a/examples-generated/ydb/v1alpha1/topic.yaml b/examples-generated/ydb/v1alpha1/topic.yaml new file mode 100644 index 0000000..1bfb5e1 --- /dev/null +++ b/examples-generated/ydb/v1alpha1/topic.yaml @@ -0,0 +1,40 @@ +apiVersion: ydb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Topic +metadata: + annotations: + meta.upbound.io/example-id: ydb/v1alpha1/topic + labels: + testing.upbound.io/example-name: topic + name: topic +spec: + forProvider: + consumer: + - name: consumer-name + startingMessageTimestampMs: 0 + supportedCodecs: + - raw + - gzip + databaseEndpointSelector: + matchLabels: + testing.upbound.io/example-name: database_name + name: topic-test + partitionsCount: 1 + retention_period_ms: 2000000 + supportedCodecs: + - raw + - gzip + +--- + +apiVersion: ydb.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DatabaseServerless +metadata: + annotations: + meta.upbound.io/example-id: ydb/v1alpha1/topic + labels: + testing.upbound.io/example-name: database_name + name: database-name +spec: + forProvider: + locationId: ru-central1 + name: database-name diff --git a/internal/controller/alb/backendgroup/zz_controller.go b/internal/controller/alb/backendgroup/zz_controller.go index b311fa0..d47e644 100755 --- a/internal/controller/alb/backendgroup/zz_controller.go +++ b/internal/controller/alb/backendgroup/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package backendgroup @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/alb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles BackendGroup managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.BackendGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_backend_group"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.BackendGroup_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_backend_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.BackendGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/alb/httprouter/zz_controller.go b/internal/controller/alb/httprouter/zz_controller.go index 99e463e..5ad47ec 100755 --- a/internal/controller/alb/httprouter/zz_controller.go +++ b/internal/controller/alb/httprouter/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package httprouter @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/alb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles HTTPRouter managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.HTTPRouter_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_http_router"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.HTTPRouter_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_http_router"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.HTTPRouter_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/alb/loadbalancer/zz_controller.go b/internal/controller/alb/loadbalancer/zz_controller.go index 6585a5f..3524e30 100755 --- a/internal/controller/alb/loadbalancer/zz_controller.go +++ b/internal/controller/alb/loadbalancer/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package loadbalancer @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/alb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles LoadBalancer managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.LoadBalancer_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_load_balancer"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.LoadBalancer_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_load_balancer"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.LoadBalancer_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/alb/targetgroup/zz_controller.go b/internal/controller/alb/targetgroup/zz_controller.go index 15c5c18..45ebb26 100755 --- a/internal/controller/alb/targetgroup/zz_controller.go +++ b/internal/controller/alb/targetgroup/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package targetgroup @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/alb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles TargetGroup managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TargetGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_target_group"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.TargetGroup_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_target_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.TargetGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/alb/virtualhost/zz_controller.go b/internal/controller/alb/virtualhost/zz_controller.go index 1657302..407f710 100755 --- a/internal/controller/alb/virtualhost/zz_controller.go +++ b/internal/controller/alb/virtualhost/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package virtualhost @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/alb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles VirtualHost managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.VirtualHost_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_virtual_host"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.VirtualHost_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_alb_virtual_host"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.VirtualHost_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/api/gateway/zz_controller.go b/internal/controller/api/gateway/zz_controller.go index 792f93d..ded5ec9 100755 --- a/internal/controller/api/gateway/zz_controller.go +++ b/internal/controller/api/gateway/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package gateway @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/api/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Gateway managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Gateway_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_api_gateway"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Gateway_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_api_gateway"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Gateway_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/audit/trailstrail/zz_controller.go b/internal/controller/audit/trailstrail/zz_controller.go new file mode 100755 index 0000000..89c26dd --- /dev/null +++ b/internal/controller/audit/trailstrail/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package trailstrail + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/audit/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles TrailsTrail managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.TrailsTrail_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.TrailsTrail_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TrailsTrail_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_audit_trails_trail"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.TrailsTrail_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.TrailsTrail + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.TrailsTrail{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.TrailsTrail") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.TrailsTrailList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.TrailsTrailList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.TrailsTrail_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.TrailsTrail{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/backup/policy/zz_controller.go b/internal/controller/backup/policy/zz_controller.go index ca6b54d..3f1fb25 100755 --- a/internal/controller/backup/policy/zz_controller.go +++ b/internal/controller/backup/policy/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package policy @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/backup/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Policy managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Policy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_backup_policy"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Policy_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_backup_policy"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Policy_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/backup/policybindings/zz_controller.go b/internal/controller/backup/policybindings/zz_controller.go index fe8e732..80f5a53 100755 --- a/internal/controller/backup/policybindings/zz_controller.go +++ b/internal/controller/backup/policybindings/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package policybindings @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/backup/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles PolicyBindings managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.PolicyBindings_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_backup_policy_bindings"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.PolicyBindings_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_backup_policy_bindings"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.PolicyBindings_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/billing/cloudbinding/zz_controller.go b/internal/controller/billing/cloudbinding/zz_controller.go new file mode 100755 index 0000000..90d2d20 --- /dev/null +++ b/internal/controller/billing/cloudbinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package cloudbinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/billing/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles CloudBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.CloudBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.CloudBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CloudBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_billing_cloud_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CloudBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.CloudBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.CloudBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.CloudBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.CloudBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.CloudBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.CloudBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.CloudBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/cdn/origingroup/zz_controller.go b/internal/controller/cdn/origingroup/zz_controller.go index 2ba7ae8..4cdaf9a 100755 --- a/internal/controller/cdn/origingroup/zz_controller.go +++ b/internal/controller/cdn/origingroup/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package origingroup @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles OriginGroup managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.OriginGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_cdn_origin_group"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.OriginGroup_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_cdn_origin_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.OriginGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/cdn/resource/zz_controller.go b/internal/controller/cdn/resource/zz_controller.go index 9f0b776..82e7831 100755 --- a/internal/controller/cdn/resource/zz_controller.go +++ b/internal/controller/cdn/resource/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package resource @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Resource managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Resource_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_cdn_resource"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Resource_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_cdn_resource"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Resource_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/cm/certificate/zz_controller.go b/internal/controller/cm/certificate/zz_controller.go new file mode 100755 index 0000000..7cafba6 --- /dev/null +++ b/internal/controller/cm/certificate/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package certificate + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/cm/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Certificate managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Certificate_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Certificate_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Certificate_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_cm_certificate"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Certificate_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Certificate + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Certificate{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Certificate") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.CertificateList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.CertificateList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Certificate_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Certificate{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/disk/zz_controller.go b/internal/controller/compute/disk/zz_controller.go new file mode 100755 index 0000000..e83cc75 --- /dev/null +++ b/internal/controller/compute/disk/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package disk + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Disk managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Disk_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Disk_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Disk_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_disk"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Disk_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Disk + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Disk{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Disk") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DiskList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DiskList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Disk_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Disk{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/diskiambinding/zz_controller.go b/internal/controller/compute/diskiambinding/zz_controller.go new file mode 100755 index 0000000..a69d8d9 --- /dev/null +++ b/internal/controller/compute/diskiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package diskiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles DiskIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.DiskIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.DiskIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.DiskIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_disk_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.DiskIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.DiskIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.DiskIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.DiskIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DiskIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DiskIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.DiskIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.DiskIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/diskplacementgroup/zz_controller.go b/internal/controller/compute/diskplacementgroup/zz_controller.go new file mode 100755 index 0000000..6d7ec3b --- /dev/null +++ b/internal/controller/compute/diskplacementgroup/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package diskplacementgroup + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles DiskPlacementGroup managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.DiskPlacementGroup_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.DiskPlacementGroup_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.DiskPlacementGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_disk_placement_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.DiskPlacementGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.DiskPlacementGroup + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.DiskPlacementGroup{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.DiskPlacementGroup") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DiskPlacementGroupList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DiskPlacementGroupList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.DiskPlacementGroup_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.DiskPlacementGroup{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/diskplacementgroupiambinding/zz_controller.go b/internal/controller/compute/diskplacementgroupiambinding/zz_controller.go new file mode 100755 index 0000000..a4953ca --- /dev/null +++ b/internal/controller/compute/diskplacementgroupiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package diskplacementgroupiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles DiskPlacementGroupIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_disk_placement_group_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.DiskPlacementGroupIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.DiskPlacementGroupIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.DiskPlacementGroupIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DiskPlacementGroupIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DiskPlacementGroupIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.DiskPlacementGroupIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/filesystem/zz_controller.go b/internal/controller/compute/filesystem/zz_controller.go new file mode 100755 index 0000000..ee186a8 --- /dev/null +++ b/internal/controller/compute/filesystem/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package filesystem + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Filesystem managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Filesystem_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Filesystem_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Filesystem_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_filesystem"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Filesystem_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Filesystem + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Filesystem{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Filesystem") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.FilesystemList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.FilesystemList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Filesystem_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Filesystem{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/filesystemiambinding/zz_controller.go b/internal/controller/compute/filesystemiambinding/zz_controller.go new file mode 100755 index 0000000..ec1fa8b --- /dev/null +++ b/internal/controller/compute/filesystemiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package filesystemiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles FilesystemIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.FilesystemIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.FilesystemIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.FilesystemIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_filesystem_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.FilesystemIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.FilesystemIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.FilesystemIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.FilesystemIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.FilesystemIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.FilesystemIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.FilesystemIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.FilesystemIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/gpucluster/zz_controller.go b/internal/controller/compute/gpucluster/zz_controller.go new file mode 100755 index 0000000..1886402 --- /dev/null +++ b/internal/controller/compute/gpucluster/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package gpucluster + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles GpuCluster managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.GpuCluster_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.GpuCluster_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.GpuCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_gpu_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.GpuCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.GpuCluster + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.GpuCluster{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.GpuCluster") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.GpuClusterList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.GpuClusterList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.GpuCluster_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.GpuCluster{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/gpuclusteriambinding/zz_controller.go b/internal/controller/compute/gpuclusteriambinding/zz_controller.go new file mode 100755 index 0000000..071e877 --- /dev/null +++ b/internal/controller/compute/gpuclusteriambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package gpuclusteriambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles GpuClusterIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.GpuClusterIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.GpuClusterIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.GpuClusterIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_gpu_cluster_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.GpuClusterIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.GpuClusterIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.GpuClusterIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.GpuClusterIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.GpuClusterIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.GpuClusterIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.GpuClusterIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.GpuClusterIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/image/zz_controller.go b/internal/controller/compute/image/zz_controller.go new file mode 100755 index 0000000..12eb511 --- /dev/null +++ b/internal/controller/compute/image/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package image + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Image managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Image_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Image_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Image_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_image"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Image_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Image + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Image{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Image") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ImageList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ImageList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Image_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Image{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/imageiambinding/zz_controller.go b/internal/controller/compute/imageiambinding/zz_controller.go new file mode 100755 index 0000000..653eb30 --- /dev/null +++ b/internal/controller/compute/imageiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package imageiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ImageIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ImageIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ImageIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ImageIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_image_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ImageIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ImageIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ImageIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ImageIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ImageIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ImageIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ImageIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ImageIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/instance/zz_controller.go b/internal/controller/compute/instance/zz_controller.go new file mode 100755 index 0000000..a425881 --- /dev/null +++ b/internal/controller/compute/instance/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package instance + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Instance managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Instance_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Instance_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Instance_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_instance"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Instance_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Instance + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Instance{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Instance") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.InstanceList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.InstanceList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Instance_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Instance{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/instancegroup/zz_controller.go b/internal/controller/compute/instancegroup/zz_controller.go new file mode 100755 index 0000000..8e252d8 --- /dev/null +++ b/internal/controller/compute/instancegroup/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package instancegroup + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles InstanceGroup managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.InstanceGroup_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.InstanceGroup_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.InstanceGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_instance_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.InstanceGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.InstanceGroup + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.InstanceGroup{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.InstanceGroup") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.InstanceGroupList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.InstanceGroupList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.InstanceGroup_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.InstanceGroup{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/instanceiambinding/zz_controller.go b/internal/controller/compute/instanceiambinding/zz_controller.go new file mode 100755 index 0000000..5ff7743 --- /dev/null +++ b/internal/controller/compute/instanceiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package instanceiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles InstanceIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.InstanceIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.InstanceIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.InstanceIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_instance_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.InstanceIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.InstanceIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.InstanceIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.InstanceIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.InstanceIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.InstanceIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.InstanceIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.InstanceIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/placementgroup/zz_controller.go b/internal/controller/compute/placementgroup/zz_controller.go new file mode 100755 index 0000000..396e4b3 --- /dev/null +++ b/internal/controller/compute/placementgroup/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package placementgroup + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles PlacementGroup managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.PlacementGroup_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.PlacementGroup_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.PlacementGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_placement_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.PlacementGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.PlacementGroup + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.PlacementGroup{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.PlacementGroup") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.PlacementGroupList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.PlacementGroupList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.PlacementGroup_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.PlacementGroup{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/placementgroupiambinding/zz_controller.go b/internal/controller/compute/placementgroupiambinding/zz_controller.go new file mode 100755 index 0000000..fbff05e --- /dev/null +++ b/internal/controller/compute/placementgroupiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package placementgroupiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles PlacementGroupIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.PlacementGroupIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.PlacementGroupIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.PlacementGroupIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_placement_group_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.PlacementGroupIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.PlacementGroupIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.PlacementGroupIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.PlacementGroupIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.PlacementGroupIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.PlacementGroupIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.PlacementGroupIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.PlacementGroupIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/snapshot/zz_controller.go b/internal/controller/compute/snapshot/zz_controller.go new file mode 100755 index 0000000..3e6acef --- /dev/null +++ b/internal/controller/compute/snapshot/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package snapshot + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Snapshot managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Snapshot_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Snapshot_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Snapshot_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_snapshot"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Snapshot_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Snapshot + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Snapshot{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Snapshot") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SnapshotList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SnapshotList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Snapshot_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Snapshot{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/snapshotiambinding/zz_controller.go b/internal/controller/compute/snapshotiambinding/zz_controller.go new file mode 100755 index 0000000..83efcde --- /dev/null +++ b/internal/controller/compute/snapshotiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package snapshotiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SnapshotIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SnapshotIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SnapshotIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SnapshotIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_snapshot_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SnapshotIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SnapshotIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SnapshotIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SnapshotIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SnapshotIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SnapshotIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SnapshotIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SnapshotIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/snapshotschedule/zz_controller.go b/internal/controller/compute/snapshotschedule/zz_controller.go new file mode 100755 index 0000000..bfebf7d --- /dev/null +++ b/internal/controller/compute/snapshotschedule/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package snapshotschedule + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SnapshotSchedule managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SnapshotSchedule_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SnapshotSchedule_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SnapshotSchedule_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_snapshot_schedule"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SnapshotSchedule_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SnapshotSchedule + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SnapshotSchedule{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SnapshotSchedule") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SnapshotScheduleList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SnapshotScheduleList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SnapshotSchedule_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SnapshotSchedule{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/snapshotscheduleiambinding/zz_controller.go b/internal/controller/compute/snapshotscheduleiambinding/zz_controller.go new file mode 100755 index 0000000..bf366ad --- /dev/null +++ b/internal/controller/compute/snapshotscheduleiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package snapshotscheduleiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SnapshotScheduleIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_snapshot_schedule_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SnapshotScheduleIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SnapshotScheduleIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SnapshotScheduleIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SnapshotScheduleIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SnapshotScheduleIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SnapshotScheduleIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/container/registry/zz_controller.go b/internal/controller/container/registry/zz_controller.go index deaeba3..835d7ae 100755 --- a/internal/controller/container/registry/zz_controller.go +++ b/internal/controller/container/registry/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package registry @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/container/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Registry managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Registry_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_registry"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Registry_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_registry"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Registry_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/container/registryiambinding/zz_controller.go b/internal/controller/container/registryiambinding/zz_controller.go index b1450f9..5c87006 100755 --- a/internal/controller/container/registryiambinding/zz_controller.go +++ b/internal/controller/container/registryiambinding/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package registryiambinding @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/container/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles RegistryIAMBinding managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.RegistryIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_registry_iam_binding"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RegistryIAMBinding_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_registry_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RegistryIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/container/registryippermission/zz_controller.go b/internal/controller/container/registryippermission/zz_controller.go index 1d6b4fc..e2f103f 100755 --- a/internal/controller/container/registryippermission/zz_controller.go +++ b/internal/controller/container/registryippermission/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package registryippermission @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/container/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles RegistryIPPermission managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.RegistryIPPermission_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_registry_ip_permission"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RegistryIPPermission_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_registry_ip_permission"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RegistryIPPermission_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/container/repository/zz_controller.go b/internal/controller/container/repository/zz_controller.go index 5683752..dff8d82 100755 --- a/internal/controller/container/repository/zz_controller.go +++ b/internal/controller/container/repository/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package repository @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/container/v1alpha1" features "github.com/tagesjump/provider-upjet-yc/internal/features" -v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/container/v1alpha1" - ) // Setup adds a controller that reconciles Repository managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Repository_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_repository"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Repository_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_repository"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Repository_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/container/repositoryiambinding/zz_controller.go b/internal/controller/container/repositoryiambinding/zz_controller.go index 94b010b..cafe40f 100755 --- a/internal/controller/container/repositoryiambinding/zz_controller.go +++ b/internal/controller/container/repositoryiambinding/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package repositoryiambinding @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/container/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles RepositoryIAMBinding managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.RepositoryIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_repository_iam_binding"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RepositoryIAMBinding_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_repository_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RepositoryIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/container/repositorylifecyclepolicy/zz_controller.go b/internal/controller/container/repositorylifecyclepolicy/zz_controller.go index 46fdacc..7459293 100755 --- a/internal/controller/container/repositorylifecyclepolicy/zz_controller.go +++ b/internal/controller/container/repositorylifecyclepolicy/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package repositorylifecyclepolicy @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/container/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles RepositoryLifecyclePolicy managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.RepositoryLifecyclePolicy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_repository_lifecycle_policy"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RepositoryLifecyclePolicy_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_container_repository_lifecycle_policy"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RepositoryLifecyclePolicy_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/dataproc/cluster/zz_controller.go b/internal/controller/dataproc/cluster/zz_controller.go index 20024f2..7ca14a4 100755 --- a/internal/controller/dataproc/cluster/zz_controller.go +++ b/internal/controller/dataproc/cluster/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package cluster @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/dataproc/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Cluster managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Cluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_dataproc_cluster"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Cluster_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_dataproc_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Cluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/datatransfer/endpoint/zz_controller.go b/internal/controller/datatransfer/endpoint/zz_controller.go index 72e5d29..18e9141 100755 --- a/internal/controller/datatransfer/endpoint/zz_controller.go +++ b/internal/controller/datatransfer/endpoint/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package endpoint @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/datatransfer/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Endpoint managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Endpoint_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_datatransfer_endpoint"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Endpoint_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_datatransfer_endpoint"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Endpoint_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/datatransfer/transfer/zz_controller.go b/internal/controller/datatransfer/transfer/zz_controller.go index 350ddb1..39a91b9 100755 --- a/internal/controller/datatransfer/transfer/zz_controller.go +++ b/internal/controller/datatransfer/transfer/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package transfer @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/datatransfer/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Transfer managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Transfer_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_datatransfer_transfer"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Transfer_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_datatransfer_transfer"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Transfer_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/dns/recordset/zz_controller.go b/internal/controller/dns/recordset/zz_controller.go index 94f1299..3eacd8b 100755 --- a/internal/controller/dns/recordset/zz_controller.go +++ b/internal/controller/dns/recordset/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package recordset @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/dns/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Recordset managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Recordset_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_dns_recordset"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Recordset_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_dns_recordset"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Recordset_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/dns/zone/zz_controller.go b/internal/controller/dns/zone/zz_controller.go index a664101..cb7081f 100755 --- a/internal/controller/dns/zone/zz_controller.go +++ b/internal/controller/dns/zone/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package zone @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/dns/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Zone managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Zone_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_dns_zone"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Zone_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_dns_zone"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Zone_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/dns/zoneiambinding/zz_controller.go b/internal/controller/dns/zoneiambinding/zz_controller.go index ac452e1..093d38d 100755 --- a/internal/controller/dns/zoneiambinding/zz_controller.go +++ b/internal/controller/dns/zoneiambinding/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package zoneiambinding @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/dns/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles ZoneIAMBinding managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ZoneIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_dns_zone_iam_binding"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ZoneIAMBinding_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_dns_zone_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ZoneIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/function/iambinding/zz_controller.go b/internal/controller/function/iambinding/zz_controller.go index c96e252..283fc8c 100755 --- a/internal/controller/function/iambinding/zz_controller.go +++ b/internal/controller/function/iambinding/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package iambinding @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/function/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles IAMBinding managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.IAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function_iam_binding"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.IAMBinding_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.IAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/function/scalingpolicy/zz_controller.go b/internal/controller/function/scalingpolicy/zz_controller.go index a13e28f..1b7f00f 100755 --- a/internal/controller/function/scalingpolicy/zz_controller.go +++ b/internal/controller/function/scalingpolicy/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package scalingpolicy @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/function/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles ScalingPolicy managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ScalingPolicy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function_scaling_policy"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ScalingPolicy_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function_scaling_policy"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ScalingPolicy_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/function/trigger/zz_controller.go b/internal/controller/function/trigger/zz_controller.go index 9f1c3fe..032ed30 100755 --- a/internal/controller/function/trigger/zz_controller.go +++ b/internal/controller/function/trigger/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package trigger @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/function/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Trigger managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Trigger_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function_trigger"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Trigger_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function_trigger"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Trigger_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/iam/serviceaccount/zz_controller.go b/internal/controller/iam/serviceaccount/zz_controller.go new file mode 100755 index 0000000..8a80b88 --- /dev/null +++ b/internal/controller/iam/serviceaccount/zz_controller.go @@ -0,0 +1,88 @@ +// Code generated by upjet. DO NOT EDIT. + +package serviceaccount + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ServiceAccount managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ServiceAccount_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ServiceAccount_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccount_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iam_service_account"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ServiceAccount_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ServiceAccount + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ServiceAccount{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ServiceAccount") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ServiceAccountList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ServiceAccountList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccount_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ServiceAccount{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iam/serviceaccountapikey/zz_controller.go b/internal/controller/iam/serviceaccountapikey/zz_controller.go new file mode 100755 index 0000000..8395941 --- /dev/null +++ b/internal/controller/iam/serviceaccountapikey/zz_controller.go @@ -0,0 +1,88 @@ +// Code generated by upjet. DO NOT EDIT. + +package serviceaccountapikey + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ServiceAccountAPIKey managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ServiceAccountAPIKey_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ServiceAccountAPIKey_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountAPIKey_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iam_service_account_api_key"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ServiceAccountAPIKey_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ServiceAccountAPIKey + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ServiceAccountAPIKey{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ServiceAccountAPIKey") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ServiceAccountAPIKeyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ServiceAccountAPIKeyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountAPIKey_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ServiceAccountAPIKey{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iam/serviceaccountiambinding/zz_controller.go b/internal/controller/iam/serviceaccountiambinding/zz_controller.go new file mode 100755 index 0000000..ca55ff5 --- /dev/null +++ b/internal/controller/iam/serviceaccountiambinding/zz_controller.go @@ -0,0 +1,88 @@ +// Code generated by upjet. DO NOT EDIT. + +package serviceaccountiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ServiceAccountIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ServiceAccountIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ServiceAccountIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iam_service_account_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ServiceAccountIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ServiceAccountIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ServiceAccountIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ServiceAccountIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ServiceAccountIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ServiceAccountIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ServiceAccountIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iam/serviceaccountiammember/zz_controller.go b/internal/controller/iam/serviceaccountiammember/zz_controller.go new file mode 100755 index 0000000..5d689bf --- /dev/null +++ b/internal/controller/iam/serviceaccountiammember/zz_controller.go @@ -0,0 +1,88 @@ +// Code generated by upjet. DO NOT EDIT. + +package serviceaccountiammember + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ServiceAccountIAMMember managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ServiceAccountIAMMember_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ServiceAccountIAMMember_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountIAMMember_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iam_service_account_iam_member"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ServiceAccountIAMMember_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ServiceAccountIAMMember + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ServiceAccountIAMMember{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ServiceAccountIAMMember") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ServiceAccountIAMMemberList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ServiceAccountIAMMemberList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountIAMMember_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ServiceAccountIAMMember{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iam/serviceaccountiampolicy/zz_controller.go b/internal/controller/iam/serviceaccountiampolicy/zz_controller.go new file mode 100755 index 0000000..22d1083 --- /dev/null +++ b/internal/controller/iam/serviceaccountiampolicy/zz_controller.go @@ -0,0 +1,88 @@ +// Code generated by upjet. DO NOT EDIT. + +package serviceaccountiampolicy + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ServiceAccountIAMPolicy managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ServiceAccountIAMPolicy_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ServiceAccountIAMPolicy_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountIAMPolicy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iam_service_account_iam_policy"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ServiceAccountIAMPolicy_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ServiceAccountIAMPolicy + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ServiceAccountIAMPolicy{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ServiceAccountIAMPolicy") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ServiceAccountIAMPolicyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ServiceAccountIAMPolicyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountIAMPolicy_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ServiceAccountIAMPolicy{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iam/serviceaccountkey/zz_controller.go b/internal/controller/iam/serviceaccountkey/zz_controller.go new file mode 100755 index 0000000..8a03d13 --- /dev/null +++ b/internal/controller/iam/serviceaccountkey/zz_controller.go @@ -0,0 +1,88 @@ +// Code generated by upjet. DO NOT EDIT. + +package serviceaccountkey + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ServiceAccountKey managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ServiceAccountKey_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ServiceAccountKey_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountKey_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iam_service_account_key"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ServiceAccountKey_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ServiceAccountKey + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ServiceAccountKey{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ServiceAccountKey") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ServiceAccountKeyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ServiceAccountKeyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountKey_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ServiceAccountKey{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iam/serviceaccountstaticaccesskey/zz_controller.go b/internal/controller/iam/serviceaccountstaticaccesskey/zz_controller.go new file mode 100755 index 0000000..5d002ce --- /dev/null +++ b/internal/controller/iam/serviceaccountstaticaccesskey/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package serviceaccountstaticaccesskey + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ServiceAccountStaticAccessKey managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ServiceAccountStaticAccessKey_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ServiceAccountStaticAccessKey_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountStaticAccessKey_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iam_service_account_static_access_key"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ServiceAccountStaticAccessKey_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ServiceAccountStaticAccessKey + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ServiceAccountStaticAccessKey{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ServiceAccountStaticAccessKey") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ServiceAccountStaticAccessKeyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ServiceAccountStaticAccessKeyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ServiceAccountStaticAccessKey_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ServiceAccountStaticAccessKey{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iot/corebroker/zz_controller.go b/internal/controller/iot/corebroker/zz_controller.go index 0c69168..6c1c8ee 100755 --- a/internal/controller/iot/corebroker/zz_controller.go +++ b/internal/controller/iot/corebroker/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package corebroker @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iot/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles CoreBroker managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CoreBroker_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iot_core_broker"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CoreBroker_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iot_core_broker"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CoreBroker_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/iot/coredevice/zz_controller.go b/internal/controller/iot/coredevice/zz_controller.go index 54ef6ca..b0831c7 100755 --- a/internal/controller/iot/coredevice/zz_controller.go +++ b/internal/controller/iot/coredevice/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package coredevice @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iot/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles CoreDevice managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CoreDevice_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iot_core_device"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CoreDevice_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iot_core_device"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CoreDevice_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/iot/coreregistry/zz_controller.go b/internal/controller/iot/coreregistry/zz_controller.go index c479fbd..c232a6a 100755 --- a/internal/controller/iot/coreregistry/zz_controller.go +++ b/internal/controller/iot/coreregistry/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package coreregistry @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iot/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles CoreRegistry managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CoreRegistry_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iot_core_registry"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CoreRegistry_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iot_core_registry"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CoreRegistry_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/kms/asymmetricencryptionkey/zz_controller.go b/internal/controller/kms/asymmetricencryptionkey/zz_controller.go new file mode 100755 index 0000000..b829957 --- /dev/null +++ b/internal/controller/kms/asymmetricencryptionkey/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package asymmetricencryptionkey + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles AsymmetricEncryptionKey managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.AsymmetricEncryptionKey_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.AsymmetricEncryptionKey_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.AsymmetricEncryptionKey_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_kms_asymmetric_encryption_key"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.AsymmetricEncryptionKey_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.AsymmetricEncryptionKey + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.AsymmetricEncryptionKey{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.AsymmetricEncryptionKey") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.AsymmetricEncryptionKeyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.AsymmetricEncryptionKeyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.AsymmetricEncryptionKey_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.AsymmetricEncryptionKey{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kms/asymmetricencryptionkeyiambinding/zz_controller.go b/internal/controller/kms/asymmetricencryptionkeyiambinding/zz_controller.go new file mode 100755 index 0000000..60e884f --- /dev/null +++ b/internal/controller/kms/asymmetricencryptionkeyiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package asymmetricencryptionkeyiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles AsymmetricEncryptionKeyIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.AsymmetricEncryptionKeyIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.AsymmetricEncryptionKeyIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.AsymmetricEncryptionKeyIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_kms_asymmetric_encryption_key_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.AsymmetricEncryptionKeyIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.AsymmetricEncryptionKeyIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.AsymmetricEncryptionKeyIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.AsymmetricEncryptionKeyIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.AsymmetricEncryptionKeyIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.AsymmetricEncryptionKeyIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.AsymmetricEncryptionKeyIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.AsymmetricEncryptionKeyIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kms/asymmetricsignaturekey/zz_controller.go b/internal/controller/kms/asymmetricsignaturekey/zz_controller.go new file mode 100755 index 0000000..1ddd8ee --- /dev/null +++ b/internal/controller/kms/asymmetricsignaturekey/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package asymmetricsignaturekey + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles AsymmetricSignatureKey managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.AsymmetricSignatureKey_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.AsymmetricSignatureKey_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.AsymmetricSignatureKey_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_kms_asymmetric_signature_key"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.AsymmetricSignatureKey_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.AsymmetricSignatureKey + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.AsymmetricSignatureKey{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.AsymmetricSignatureKey") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.AsymmetricSignatureKeyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.AsymmetricSignatureKeyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.AsymmetricSignatureKey_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.AsymmetricSignatureKey{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kms/asymmetricsignaturekeyiambinding/zz_controller.go b/internal/controller/kms/asymmetricsignaturekeyiambinding/zz_controller.go new file mode 100755 index 0000000..9391da0 --- /dev/null +++ b/internal/controller/kms/asymmetricsignaturekeyiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package asymmetricsignaturekeyiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles AsymmetricSignatureKeyIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.AsymmetricSignatureKeyIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.AsymmetricSignatureKeyIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.AsymmetricSignatureKeyIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_kms_asymmetric_signature_key_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.AsymmetricSignatureKeyIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.AsymmetricSignatureKeyIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.AsymmetricSignatureKeyIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.AsymmetricSignatureKeyIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.AsymmetricSignatureKeyIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.AsymmetricSignatureKeyIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.AsymmetricSignatureKeyIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.AsymmetricSignatureKeyIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kms/secretciphertext/zz_controller.go b/internal/controller/kms/secretciphertext/zz_controller.go new file mode 100755 index 0000000..7b77e3f --- /dev/null +++ b/internal/controller/kms/secretciphertext/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package secretciphertext + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SecretCiphertext managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SecretCiphertext_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SecretCiphertext_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SecretCiphertext_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_kms_secret_ciphertext"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SecretCiphertext_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SecretCiphertext + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SecretCiphertext{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SecretCiphertext") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SecretCiphertextList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SecretCiphertextList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SecretCiphertext_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SecretCiphertext{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kms/symmetrickey/zz_controller.go b/internal/controller/kms/symmetrickey/zz_controller.go new file mode 100755 index 0000000..06a2f93 --- /dev/null +++ b/internal/controller/kms/symmetrickey/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package symmetrickey + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SymmetricKey managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SymmetricKey_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SymmetricKey_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SymmetricKey_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_kms_symmetric_key"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SymmetricKey_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SymmetricKey + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SymmetricKey{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SymmetricKey") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SymmetricKeyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SymmetricKeyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SymmetricKey_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SymmetricKey{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kms/symmetrickeyiambinding/zz_controller.go b/internal/controller/kms/symmetrickeyiambinding/zz_controller.go new file mode 100755 index 0000000..39de51f --- /dev/null +++ b/internal/controller/kms/symmetrickeyiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package symmetrickeyiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SymmetricKeyIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SymmetricKeyIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SymmetricKeyIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SymmetricKeyIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_kms_symmetric_key_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SymmetricKeyIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SymmetricKeyIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SymmetricKeyIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SymmetricKeyIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SymmetricKeyIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SymmetricKeyIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SymmetricKeyIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SymmetricKeyIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kubernetes/cluster/zz_controller.go b/internal/controller/kubernetes/cluster/zz_controller.go new file mode 100755 index 0000000..2b2be66 --- /dev/null +++ b/internal/controller/kubernetes/cluster/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package cluster + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/kubernetes/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Cluster managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Cluster_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Cluster_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Cluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_kubernetes_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Cluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Cluster + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Cluster{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Cluster") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ClusterList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ClusterList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Cluster_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Cluster{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kubernetes/nodegroup/zz_controller.go b/internal/controller/kubernetes/nodegroup/zz_controller.go new file mode 100755 index 0000000..d6140f9 --- /dev/null +++ b/internal/controller/kubernetes/nodegroup/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package nodegroup + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/kubernetes/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles NodeGroup managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.NodeGroup_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.NodeGroup_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.NodeGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_kubernetes_node_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.NodeGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.NodeGroup + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.NodeGroup{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.NodeGroup") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.NodeGroupList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.NodeGroupList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.NodeGroup_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.NodeGroup{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/networkloadbalancer/zz_controller.go b/internal/controller/lb/networkloadbalancer/zz_controller.go index fa3fbc6..64c4423 100755 --- a/internal/controller/lb/networkloadbalancer/zz_controller.go +++ b/internal/controller/lb/networkloadbalancer/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package networkloadbalancer @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/lb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles NetworkLoadBalancer managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.NetworkLoadBalancer_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lb_network_load_balancer"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.NetworkLoadBalancer_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lb_network_load_balancer"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.NetworkLoadBalancer_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/lb/targetgroup/zz_controller.go b/internal/controller/lb/targetgroup/zz_controller.go index 9e927bf..eac2f91 100755 --- a/internal/controller/lb/targetgroup/zz_controller.go +++ b/internal/controller/lb/targetgroup/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package targetgroup @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/lb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles TargetGroup managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TargetGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lb_target_group"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.TargetGroup_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lb_target_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.TargetGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/loadtesting/agent/zz_controller.go b/internal/controller/loadtesting/agent/zz_controller.go index 67ccade..7c420aa 100755 --- a/internal/controller/loadtesting/agent/zz_controller.go +++ b/internal/controller/loadtesting/agent/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package agent @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/loadtesting/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Agent managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Agent_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_loadtesting_agent"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Agent_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_loadtesting_agent"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Agent_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/lockbox/secret/zz_controller.go b/internal/controller/lockbox/secret/zz_controller.go index 6cde5eb..26873f1 100755 --- a/internal/controller/lockbox/secret/zz_controller.go +++ b/internal/controller/lockbox/secret/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package secret @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Secret managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Secret_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lockbox_secret"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Secret_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lockbox_secret"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Secret_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/lockbox/secretiambinding/zz_controller.go b/internal/controller/lockbox/secretiambinding/zz_controller.go index cf64bcd..86ce971 100755 --- a/internal/controller/lockbox/secretiambinding/zz_controller.go +++ b/internal/controller/lockbox/secretiambinding/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package secretiambinding @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles SecretIAMBinding managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SecretIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lockbox_secret_iam_binding"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SecretIAMBinding_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lockbox_secret_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SecretIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/lockbox/secretversion/zz_controller.go b/internal/controller/lockbox/secretversion/zz_controller.go index 1297c98..1de22f1 100755 --- a/internal/controller/lockbox/secretversion/zz_controller.go +++ b/internal/controller/lockbox/secretversion/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package secretversion @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/lockbox/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles SecretVersion managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SecretVersion_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lockbox_secret_version"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SecretVersion_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_lockbox_secret_version"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SecretVersion_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/logging/group/zz_controller.go b/internal/controller/logging/group/zz_controller.go new file mode 100755 index 0000000..8f59891 --- /dev/null +++ b/internal/controller/logging/group/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package group + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/logging/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Group managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Group_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Group_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Group_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_logging_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Group_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Group + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Group{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Group") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.GroupList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.GroupList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Group_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Group{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/mdb/clickhousecluster/zz_controller.go b/internal/controller/mdb/clickhousecluster/zz_controller.go index 9ef5922..84f6ca8 100755 --- a/internal/controller/mdb/clickhousecluster/zz_controller.go +++ b/internal/controller/mdb/clickhousecluster/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package clickhousecluster @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles ClickhouseCluster managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ClickhouseCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_clickhouse_cluster"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ClickhouseCluster_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_clickhouse_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ClickhouseCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/elasticsearchcluster/zz_controller.go b/internal/controller/mdb/elasticsearchcluster/zz_controller.go index 348c75c..7b156cd 100755 --- a/internal/controller/mdb/elasticsearchcluster/zz_controller.go +++ b/internal/controller/mdb/elasticsearchcluster/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package elasticsearchcluster @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles ElasticsearchCluster managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ElasticsearchCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_elasticsearch_cluster"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ElasticsearchCluster_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_elasticsearch_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ElasticsearchCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/greenplumcluster/zz_controller.go b/internal/controller/mdb/greenplumcluster/zz_controller.go index bdc11d5..247ac8f 100755 --- a/internal/controller/mdb/greenplumcluster/zz_controller.go +++ b/internal/controller/mdb/greenplumcluster/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package greenplumcluster @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles GreenplumCluster managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.GreenplumCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_greenplum_cluster"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.GreenplumCluster_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_greenplum_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.GreenplumCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/kafkacluster/zz_controller.go b/internal/controller/mdb/kafkacluster/zz_controller.go index 75cdf7b..3b7aa42 100755 --- a/internal/controller/mdb/kafkacluster/zz_controller.go +++ b/internal/controller/mdb/kafkacluster/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package kafkacluster @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles KafkaCluster managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.KafkaCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_kafka_cluster"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.KafkaCluster_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_kafka_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.KafkaCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/kafkaconnector/zz_controller.go b/internal/controller/mdb/kafkaconnector/zz_controller.go index 2395cda..46610f1 100755 --- a/internal/controller/mdb/kafkaconnector/zz_controller.go +++ b/internal/controller/mdb/kafkaconnector/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package kafkaconnector @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles KafkaConnector managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.KafkaConnector_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_kafka_connector"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.KafkaConnector_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_kafka_connector"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.KafkaConnector_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/kafkatopic/zz_controller.go b/internal/controller/mdb/kafkatopic/zz_controller.go index 541c761..855660b 100755 --- a/internal/controller/mdb/kafkatopic/zz_controller.go +++ b/internal/controller/mdb/kafkatopic/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package kafkatopic @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles KafkaTopic managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.KafkaTopic_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_kafka_topic"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.KafkaTopic_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_kafka_topic"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.KafkaTopic_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/kafkauser/zz_controller.go b/internal/controller/mdb/kafkauser/zz_controller.go index d32c92f..97ac7d4 100755 --- a/internal/controller/mdb/kafkauser/zz_controller.go +++ b/internal/controller/mdb/kafkauser/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package kafkauser @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles KafkaUser managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.KafkaUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_kafka_user"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.KafkaUser_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_kafka_user"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.KafkaUser_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/mongodbcluster/zz_controller.go b/internal/controller/mdb/mongodbcluster/zz_controller.go index c351927..df44028 100755 --- a/internal/controller/mdb/mongodbcluster/zz_controller.go +++ b/internal/controller/mdb/mongodbcluster/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package mongodbcluster @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles MongodbCluster managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MongodbCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_cluster"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MongodbCluster_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MongodbCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/mongodbdatabase/zz_controller.go b/internal/controller/mdb/mongodbdatabase/zz_controller.go index 83bfed4..c352a0b 100755 --- a/internal/controller/mdb/mongodbdatabase/zz_controller.go +++ b/internal/controller/mdb/mongodbdatabase/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package mongodbdatabase @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles MongodbDatabase managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MongodbDatabase_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_database"], - tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MongodbDatabase_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_database"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MongodbDatabase_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/mongodbuser/zz_controller.go b/internal/controller/mdb/mongodbuser/zz_controller.go index 05db4af..2ae0f2b 100755 --- a/internal/controller/mdb/mongodbuser/zz_controller.go +++ b/internal/controller/mdb/mongodbuser/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package mongodbuser @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles MongodbUser managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MongodbUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_user"], - tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MongodbUser_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_user"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MongodbUser_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/mysqlcluster/zz_controller.go b/internal/controller/mdb/mysqlcluster/zz_controller.go index 343fbbb..f2d8385 100755 --- a/internal/controller/mdb/mysqlcluster/zz_controller.go +++ b/internal/controller/mdb/mysqlcluster/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package mysqlcluster @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles MySQLCluster managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MySQLCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mysql_cluster"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MySQLCluster_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mysql_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MySQLCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/mysqldatabase/zz_controller.go b/internal/controller/mdb/mysqldatabase/zz_controller.go index dba924d..2334e87 100755 --- a/internal/controller/mdb/mysqldatabase/zz_controller.go +++ b/internal/controller/mdb/mysqldatabase/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package mysqldatabase @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles MySQLDatabase managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MySQLDatabase_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mysql_database"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MySQLDatabase_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mysql_database"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MySQLDatabase_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/mysqluser/zz_controller.go b/internal/controller/mdb/mysqluser/zz_controller.go index c5df6b7..db8e85f 100755 --- a/internal/controller/mdb/mysqluser/zz_controller.go +++ b/internal/controller/mdb/mysqluser/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package mysqluser @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles MySQLUser managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MySQLUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mysql_user"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MySQLUser_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mysql_user"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MySQLUser_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/mdb/postgresqlcluster/zz_controller.go b/internal/controller/mdb/postgresqlcluster/zz_controller.go new file mode 100755 index 0000000..d9fadb1 --- /dev/null +++ b/internal/controller/mdb/postgresqlcluster/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package postgresqlcluster + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles PostgresqlCluster managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.PostgresqlCluster_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.PostgresqlCluster_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.PostgresqlCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_postgresql_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.PostgresqlCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.PostgresqlCluster + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.PostgresqlCluster{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.PostgresqlCluster") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.PostgresqlClusterList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.PostgresqlClusterList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.PostgresqlCluster_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.PostgresqlCluster{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/mdb/postgresqldatabase/zz_controller.go b/internal/controller/mdb/postgresqldatabase/zz_controller.go new file mode 100755 index 0000000..ea37c0c --- /dev/null +++ b/internal/controller/mdb/postgresqldatabase/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package postgresqldatabase + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles PostgresqlDatabase managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.PostgresqlDatabase_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.PostgresqlDatabase_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.PostgresqlDatabase_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_postgresql_database"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.PostgresqlDatabase_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.PostgresqlDatabase + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.PostgresqlDatabase{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.PostgresqlDatabase") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.PostgresqlDatabaseList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.PostgresqlDatabaseList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.PostgresqlDatabase_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.PostgresqlDatabase{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/mdb/postgresqluser/zz_controller.go b/internal/controller/mdb/postgresqluser/zz_controller.go new file mode 100755 index 0000000..297f890 --- /dev/null +++ b/internal/controller/mdb/postgresqluser/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package postgresqluser + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles PostgresqlUser managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.PostgresqlUser_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.PostgresqlUser_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.PostgresqlUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_postgresql_user"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.PostgresqlUser_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.PostgresqlUser + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.PostgresqlUser{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.PostgresqlUser") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.PostgresqlUserList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.PostgresqlUserList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.PostgresqlUser_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.PostgresqlUser{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/mdb/rediscluster/zz_controller.go b/internal/controller/mdb/rediscluster/zz_controller.go new file mode 100755 index 0000000..2e1be84 --- /dev/null +++ b/internal/controller/mdb/rediscluster/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package rediscluster + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles RedisCluster managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.RedisCluster_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.RedisCluster_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.RedisCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_redis_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RedisCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.RedisCluster + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.RedisCluster{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.RedisCluster") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.RedisClusterList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.RedisClusterList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.RedisCluster_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.RedisCluster{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/mdb/sqlservercluster/zz_controller.go b/internal/controller/mdb/sqlservercluster/zz_controller.go new file mode 100755 index 0000000..1e4580e --- /dev/null +++ b/internal/controller/mdb/sqlservercluster/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package sqlservercluster + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/mdb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SqlserverCluster managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SqlserverCluster_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SqlserverCluster_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SqlserverCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_sqlserver_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SqlserverCluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SqlserverCluster + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SqlserverCluster{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SqlserverCluster") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SqlserverClusterList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SqlserverClusterList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SqlserverCluster_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SqlserverCluster{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/message/queue/zz_controller.go b/internal/controller/message/queue/zz_controller.go new file mode 100755 index 0000000..1499b45 --- /dev/null +++ b/internal/controller/message/queue/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package queue + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/message/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Queue managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Queue_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Queue_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Queue_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_message_queue"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Queue_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Queue + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Queue{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Queue") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.QueueList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.QueueList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Queue_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Queue{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/monitoring/dashboard/zz_controller.go b/internal/controller/monitoring/dashboard/zz_controller.go index f89152b..575c4a2 100755 --- a/internal/controller/monitoring/dashboard/zz_controller.go +++ b/internal/controller/monitoring/dashboard/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package dashboard @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/monitoring/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Dashboard managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Dashboard_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_monitoring_dashboard"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Dashboard_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_monitoring_dashboard"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Dashboard_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/organizationmanager/group/zz_controller.go b/internal/controller/organizationmanager/group/zz_controller.go new file mode 100755 index 0000000..94e7f7a --- /dev/null +++ b/internal/controller/organizationmanager/group/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package group + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Group managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Group_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Group_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Group_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_organizationmanager_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Group_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Group + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Group{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Group") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.GroupList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.GroupList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Group_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Group{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/organizationmanager/groupiammember/zz_controller.go b/internal/controller/organizationmanager/groupiammember/zz_controller.go new file mode 100755 index 0000000..e8770a4 --- /dev/null +++ b/internal/controller/organizationmanager/groupiammember/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package groupiammember + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles GroupIAMMember managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.GroupIAMMember_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.GroupIAMMember_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.GroupIAMMember_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_organizationmanager_group_iam_member"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.GroupIAMMember_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.GroupIAMMember + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.GroupIAMMember{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.GroupIAMMember") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.GroupIAMMemberList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.GroupIAMMemberList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.GroupIAMMember_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.GroupIAMMember{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/organizationmanager/groupmembership/zz_controller.go b/internal/controller/organizationmanager/groupmembership/zz_controller.go new file mode 100755 index 0000000..a50a5dd --- /dev/null +++ b/internal/controller/organizationmanager/groupmembership/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package groupmembership + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles GroupMembership managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.GroupMembership_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.GroupMembership_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.GroupMembership_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_organizationmanager_group_membership"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.GroupMembership_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.GroupMembership + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.GroupMembership{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.GroupMembership") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.GroupMembershipList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.GroupMembershipList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.GroupMembership_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.GroupMembership{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/organizationmanager/organizationiambinding/zz_controller.go b/internal/controller/organizationmanager/organizationiambinding/zz_controller.go new file mode 100755 index 0000000..5964977 --- /dev/null +++ b/internal/controller/organizationmanager/organizationiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package organizationiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles OrganizationIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.OrganizationIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.OrganizationIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.OrganizationIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_organizationmanager_organization_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.OrganizationIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.OrganizationIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.OrganizationIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.OrganizationIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.OrganizationIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.OrganizationIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.OrganizationIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.OrganizationIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/organizationmanager/organizationiammember/zz_controller.go b/internal/controller/organizationmanager/organizationiammember/zz_controller.go new file mode 100755 index 0000000..8db13c6 --- /dev/null +++ b/internal/controller/organizationmanager/organizationiammember/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package organizationiammember + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles OrganizationIAMMember managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.OrganizationIAMMember_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.OrganizationIAMMember_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.OrganizationIAMMember_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_organizationmanager_organization_iam_member"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.OrganizationIAMMember_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.OrganizationIAMMember + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.OrganizationIAMMember{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.OrganizationIAMMember") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.OrganizationIAMMemberList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.OrganizationIAMMemberList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.OrganizationIAMMember_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.OrganizationIAMMember{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/organizationmanager/osloginsettings/zz_controller.go b/internal/controller/organizationmanager/osloginsettings/zz_controller.go new file mode 100755 index 0000000..a16847c --- /dev/null +++ b/internal/controller/organizationmanager/osloginsettings/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package osloginsettings + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles OsLoginSettings managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.OsLoginSettings_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.OsLoginSettings_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.OsLoginSettings_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_organizationmanager_os_login_settings"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.OsLoginSettings_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.OsLoginSettings + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.OsLoginSettings{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.OsLoginSettings") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.OsLoginSettingsList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.OsLoginSettingsList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.OsLoginSettings_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.OsLoginSettings{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/organizationmanager/samlfederation/zz_controller.go b/internal/controller/organizationmanager/samlfederation/zz_controller.go new file mode 100755 index 0000000..52d596c --- /dev/null +++ b/internal/controller/organizationmanager/samlfederation/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package samlfederation + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SAMLFederation managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SAMLFederation_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SAMLFederation_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SAMLFederation_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_organizationmanager_saml_federation"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SAMLFederation_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SAMLFederation + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SAMLFederation{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SAMLFederation") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SAMLFederationList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SAMLFederationList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SAMLFederation_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SAMLFederation{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/organizationmanager/samlfederationuseraccount/zz_controller.go b/internal/controller/organizationmanager/samlfederationuseraccount/zz_controller.go new file mode 100755 index 0000000..20a6de2 --- /dev/null +++ b/internal/controller/organizationmanager/samlfederationuseraccount/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package samlfederationuseraccount + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SAMLFederationUserAccount managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SAMLFederationUserAccount_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SAMLFederationUserAccount_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SAMLFederationUserAccount_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_organizationmanager_saml_federation_user_account"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SAMLFederationUserAccount_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SAMLFederationUserAccount + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SAMLFederationUserAccount{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SAMLFederationUserAccount") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SAMLFederationUserAccountList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SAMLFederationUserAccountList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SAMLFederationUserAccount_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SAMLFederationUserAccount{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/organizationmanager/usersshkey/zz_controller.go b/internal/controller/organizationmanager/usersshkey/zz_controller.go new file mode 100755 index 0000000..8aab036 --- /dev/null +++ b/internal/controller/organizationmanager/usersshkey/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package usersshkey + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles UserSSHKey managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.UserSSHKey_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.UserSSHKey_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.UserSSHKey_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_organizationmanager_user_ssh_key"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.UserSSHKey_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.UserSSHKey + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.UserSSHKey{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.UserSSHKey") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.UserSSHKeyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.UserSSHKeyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.UserSSHKey_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.UserSSHKey{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/resourcemanager/cloud/zz_controller.go b/internal/controller/resourcemanager/cloud/zz_controller.go index 1eef173..0ac0fa3 100755 --- a/internal/controller/resourcemanager/cloud/zz_controller.go +++ b/internal/controller/resourcemanager/cloud/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package cloud @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Cloud managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Cloud_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_cloud"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Cloud_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_cloud"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Cloud_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/resourcemanager/cloudiambinding/zz_controller.go b/internal/controller/resourcemanager/cloudiambinding/zz_controller.go index 437e4b7..2d0f8ac 100755 --- a/internal/controller/resourcemanager/cloudiambinding/zz_controller.go +++ b/internal/controller/resourcemanager/cloudiambinding/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package cloudiambinding @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles CloudIAMBinding managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CloudIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_cloud_iam_binding"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CloudIAMBinding_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_cloud_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CloudIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/resourcemanager/cloudiammember/zz_controller.go b/internal/controller/resourcemanager/cloudiammember/zz_controller.go index 8d0c2bc..60860a6 100755 --- a/internal/controller/resourcemanager/cloudiammember/zz_controller.go +++ b/internal/controller/resourcemanager/cloudiammember/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package cloudiammember @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles CloudIAMMember managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CloudIAMMember_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_cloud_iam_member"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CloudIAMMember_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_cloud_iam_member"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CloudIAMMember_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/resourcemanager/folder/zz_controller.go b/internal/controller/resourcemanager/folder/zz_controller.go index 86b5687..a8df4f1 100755 --- a/internal/controller/resourcemanager/folder/zz_controller.go +++ b/internal/controller/resourcemanager/folder/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package folder @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" features "github.com/tagesjump/provider-upjet-yc/internal/features" -v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" - ) // Setup adds a controller that reconciles Folder managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Folder_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_folder"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Folder_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_folder"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Folder_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/resourcemanager/folderiambinding/zz_controller.go b/internal/controller/resourcemanager/folderiambinding/zz_controller.go index 397ae2e..9a09350 100755 --- a/internal/controller/resourcemanager/folderiambinding/zz_controller.go +++ b/internal/controller/resourcemanager/folderiambinding/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package folderiambinding @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" features "github.com/tagesjump/provider-upjet-yc/internal/features" -v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" - ) // Setup adds a controller that reconciles FolderIAMBinding managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.FolderIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_folder_iam_binding"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.FolderIAMBinding_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_folder_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.FolderIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/resourcemanager/folderiammember/zz_controller.go b/internal/controller/resourcemanager/folderiammember/zz_controller.go index e6eb35c..17529e4 100755 --- a/internal/controller/resourcemanager/folderiammember/zz_controller.go +++ b/internal/controller/resourcemanager/folderiammember/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package folderiammember @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles FolderIAMMember managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.FolderIAMMember_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_folder_iam_member"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.FolderIAMMember_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_folder_iam_member"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.FolderIAMMember_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/resourcemanager/folderiampolicy/zz_controller.go b/internal/controller/resourcemanager/folderiampolicy/zz_controller.go index 19bda62..ae7eba7 100755 --- a/internal/controller/resourcemanager/folderiampolicy/zz_controller.go +++ b/internal/controller/resourcemanager/folderiampolicy/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package folderiampolicy @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles FolderIAMPolicy managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.FolderIAMPolicy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_folder_iam_policy"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.FolderIAMPolicy_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_resourcemanager_folder_iam_policy"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.FolderIAMPolicy_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/serverless/container/zz_controller.go b/internal/controller/serverless/container/zz_controller.go index b413c37..775323e 100755 --- a/internal/controller/serverless/container/zz_controller.go +++ b/internal/controller/serverless/container/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package container @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/serverless/v1alpha1" features "github.com/tagesjump/provider-upjet-yc/internal/features" -v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/serverless/v1alpha1" - ) // Setup adds a controller that reconciles Container managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Container_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_serverless_container"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Container_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_serverless_container"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Container_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/serverless/containeriambinding/zz_controller.go b/internal/controller/serverless/containeriambinding/zz_controller.go index b269146..f2f6411 100755 --- a/internal/controller/serverless/containeriambinding/zz_controller.go +++ b/internal/controller/serverless/containeriambinding/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package containeriambinding @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/serverless/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles ContainerIAMBinding managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ContainerIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_serverless_container_iam_binding"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ContainerIAMBinding_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_serverless_container_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ContainerIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/smartcaptcha/captcha/zz_controller.go b/internal/controller/smartcaptcha/captcha/zz_controller.go index 3667298..c3cd648 100755 --- a/internal/controller/smartcaptcha/captcha/zz_controller.go +++ b/internal/controller/smartcaptcha/captcha/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package captcha @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/smartcaptcha/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles Captcha managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Captcha_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_smartcaptcha_captcha"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Captcha_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_smartcaptcha_captcha"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Captcha_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/storage/bucket/zz_controller.go b/internal/controller/storage/bucket/zz_controller.go new file mode 100755 index 0000000..6e25abb --- /dev/null +++ b/internal/controller/storage/bucket/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package bucket + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Bucket managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Bucket_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Bucket_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Bucket_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_storage_bucket"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Bucket_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Bucket + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Bucket{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Bucket") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.BucketList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.BucketList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Bucket_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Bucket{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/storage/object/zz_controller.go b/internal/controller/storage/object/zz_controller.go new file mode 100755 index 0000000..742460c --- /dev/null +++ b/internal/controller/storage/object/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package object + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Object managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Object_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Object_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Object_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_storage_object"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Object_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Object + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Object{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Object") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ObjectList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ObjectList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Object_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Object{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/sws/securityprofile/zz_controller.go b/internal/controller/sws/securityprofile/zz_controller.go index f4d4d26..2968bab 100755 --- a/internal/controller/sws/securityprofile/zz_controller.go +++ b/internal/controller/sws/securityprofile/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package securityprofile @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/sws/v1alpha1" -features "github.com/tagesjump/provider-upjet-yc/internal/features" - + features "github.com/tagesjump/provider-upjet-yc/internal/features" ) // Setup adds a controller that reconciles SecurityProfile managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SecurityProfile_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_sws_security_profile"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SecurityProfile_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_sws_security_profile"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SecurityProfile_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/vpc/address/zz_controller.go b/internal/controller/vpc/address/zz_controller.go new file mode 100755 index 0000000..47f291c --- /dev/null +++ b/internal/controller/vpc/address/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package address + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Address managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Address_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Address_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Address_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_vpc_address"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Address_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Address + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Address{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Address") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.AddressList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.AddressList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Address_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Address{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/vpc/defaultsecuritygroup/zz_controller.go b/internal/controller/vpc/defaultsecuritygroup/zz_controller.go new file mode 100755 index 0000000..77e3a20 --- /dev/null +++ b/internal/controller/vpc/defaultsecuritygroup/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package defaultsecuritygroup + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles DefaultSecurityGroup managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.DefaultSecurityGroup_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.DefaultSecurityGroup_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.DefaultSecurityGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_vpc_default_security_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.DefaultSecurityGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.DefaultSecurityGroup + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.DefaultSecurityGroup{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.DefaultSecurityGroup") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DefaultSecurityGroupList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DefaultSecurityGroupList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.DefaultSecurityGroup_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.DefaultSecurityGroup{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/vpc/gateway/zz_controller.go b/internal/controller/vpc/gateway/zz_controller.go new file mode 100755 index 0000000..ce9bad6 --- /dev/null +++ b/internal/controller/vpc/gateway/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package gateway + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Gateway managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Gateway_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Gateway_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Gateway_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_vpc_gateway"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Gateway_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Gateway + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Gateway{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Gateway") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.GatewayList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.GatewayList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Gateway_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Gateway{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/vpc/network/zz_controller.go b/internal/controller/vpc/network/zz_controller.go new file mode 100755 index 0000000..81cf9aa --- /dev/null +++ b/internal/controller/vpc/network/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package network + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Network managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Network_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Network_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Network_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_vpc_network"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Network_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Network + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Network{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Network") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.NetworkList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.NetworkList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Network_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Network{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/vpc/privateendpoint/zz_controller.go b/internal/controller/vpc/privateendpoint/zz_controller.go new file mode 100755 index 0000000..02e299a --- /dev/null +++ b/internal/controller/vpc/privateendpoint/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package privateendpoint + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles PrivateEndpoint managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.PrivateEndpoint_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.PrivateEndpoint_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.PrivateEndpoint_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_vpc_private_endpoint"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.PrivateEndpoint_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.PrivateEndpoint + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.PrivateEndpoint{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.PrivateEndpoint") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.PrivateEndpointList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.PrivateEndpointList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.PrivateEndpoint_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.PrivateEndpoint{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/vpc/routetable/zz_controller.go b/internal/controller/vpc/routetable/zz_controller.go new file mode 100755 index 0000000..b2dff3f --- /dev/null +++ b/internal/controller/vpc/routetable/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package routetable + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles RouteTable managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.RouteTable_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.RouteTable_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.RouteTable_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_vpc_route_table"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.RouteTable_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.RouteTable + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.RouteTable{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.RouteTable") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.RouteTableList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.RouteTableList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.RouteTable_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.RouteTable{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/vpc/securitygroup/zz_controller.go b/internal/controller/vpc/securitygroup/zz_controller.go new file mode 100755 index 0000000..384afd8 --- /dev/null +++ b/internal/controller/vpc/securitygroup/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package securitygroup + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SecurityGroup managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SecurityGroup_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SecurityGroup_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SecurityGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_vpc_security_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SecurityGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SecurityGroup + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SecurityGroup{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SecurityGroup") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SecurityGroupList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SecurityGroupList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SecurityGroup_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SecurityGroup{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/vpc/securitygrouprule/zz_controller.go b/internal/controller/vpc/securitygrouprule/zz_controller.go new file mode 100755 index 0000000..92b4e4b --- /dev/null +++ b/internal/controller/vpc/securitygrouprule/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package securitygrouprule + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SecurityGroupRule managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SecurityGroupRule_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SecurityGroupRule_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SecurityGroupRule_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_vpc_security_group_rule"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SecurityGroupRule_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SecurityGroupRule + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SecurityGroupRule{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SecurityGroupRule") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SecurityGroupRuleList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SecurityGroupRuleList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SecurityGroupRule_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SecurityGroupRule{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/vpc/subnet/zz_controller.go b/internal/controller/vpc/subnet/zz_controller.go new file mode 100755 index 0000000..8020051 --- /dev/null +++ b/internal/controller/vpc/subnet/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package subnet + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Subnet managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Subnet_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Subnet_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Subnet_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_vpc_subnet"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Subnet_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Subnet + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Subnet{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Subnet") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SubnetList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SubnetList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Subnet_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Subnet{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/yandex/function/zz_controller.go b/internal/controller/yandex/function/zz_controller.go index 3ca648b..74fb2cb 100755 --- a/internal/controller/yandex/function/zz_controller.go +++ b/internal/controller/yandex/function/zz_controller.go @@ -1,5 +1,3 @@ - - // Code generated by upjet. DO NOT EDIT. package function @@ -13,15 +11,14 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" - "github.com/crossplane/upjet/pkg/controller/handler" tjcontroller "github.com/crossplane/upjet/pkg/controller" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/yandex/v1alpha1" features "github.com/tagesjump/provider-upjet-yc/internal/features" -v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/yandex/v1alpha1" - ) // Setup adds a controller that reconciles Function managed resources. @@ -36,22 +33,22 @@ func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Function_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ managed.WithExternalConnecter( - tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function"], - tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), - tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), - tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), - tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Function_GroupVersionKind, mgr, o.PollInterval)), - tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Function_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), - managed.WithTimeout(3*time.Minute), + managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), managed.WithPollInterval(o.PollInterval), } if o.PollJitter != 0 { - opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) } if o.Features.Enabled(features.EnableBetaManagementPolicies) { opts = append(opts, managed.WithManagementPolicies()) diff --git a/internal/controller/ydb/databasededicated/zz_controller.go b/internal/controller/ydb/databasededicated/zz_controller.go new file mode 100755 index 0000000..2b61ab0 --- /dev/null +++ b/internal/controller/ydb/databasededicated/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package databasededicated + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles DatabaseDedicated managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.DatabaseDedicated_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.DatabaseDedicated_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.DatabaseDedicated_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_ydb_database_dedicated"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.DatabaseDedicated_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.DatabaseDedicated + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.DatabaseDedicated{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.DatabaseDedicated") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DatabaseDedicatedList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DatabaseDedicatedList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.DatabaseDedicated_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.DatabaseDedicated{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/ydb/databaseiambinding/zz_controller.go b/internal/controller/ydb/databaseiambinding/zz_controller.go new file mode 100755 index 0000000..a6a14d8 --- /dev/null +++ b/internal/controller/ydb/databaseiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package databaseiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles DatabaseIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.DatabaseIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.DatabaseIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.DatabaseIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_ydb_database_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.DatabaseIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.DatabaseIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.DatabaseIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.DatabaseIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DatabaseIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DatabaseIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.DatabaseIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.DatabaseIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/ydb/databaseserverless/zz_controller.go b/internal/controller/ydb/databaseserverless/zz_controller.go new file mode 100755 index 0000000..285f1dd --- /dev/null +++ b/internal/controller/ydb/databaseserverless/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package databaseserverless + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles DatabaseServerless managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.DatabaseServerless_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.DatabaseServerless_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.DatabaseServerless_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_ydb_database_serverless"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.DatabaseServerless_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.DatabaseServerless + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.DatabaseServerless{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.DatabaseServerless") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DatabaseServerlessList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DatabaseServerlessList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.DatabaseServerless_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.DatabaseServerless{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/ydb/table/zz_controller.go b/internal/controller/ydb/table/zz_controller.go new file mode 100755 index 0000000..63514d4 --- /dev/null +++ b/internal/controller/ydb/table/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package table + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Table managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Table_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Table_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Table_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_ydb_table"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Table_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Table + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Table{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Table") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.TableList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.TableList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Table_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Table{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/ydb/tablechangefeed/zz_controller.go b/internal/controller/ydb/tablechangefeed/zz_controller.go new file mode 100755 index 0000000..7b2b8f1 --- /dev/null +++ b/internal/controller/ydb/tablechangefeed/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package tablechangefeed + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles TableChangefeed managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.TableChangefeed_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.TableChangefeed_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TableChangefeed_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_ydb_table_changefeed"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.TableChangefeed_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.TableChangefeed + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.TableChangefeed{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.TableChangefeed") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.TableChangefeedList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.TableChangefeedList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.TableChangefeed_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.TableChangefeed{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/ydb/tableindex/zz_controller.go b/internal/controller/ydb/tableindex/zz_controller.go new file mode 100755 index 0000000..0b89360 --- /dev/null +++ b/internal/controller/ydb/tableindex/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package tableindex + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles TableIndex managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.TableIndex_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.TableIndex_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TableIndex_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_ydb_table_index"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.TableIndex_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.TableIndex + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.TableIndex{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.TableIndex") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.TableIndexList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.TableIndexList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.TableIndex_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.TableIndex{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/ydb/topic/zz_controller.go b/internal/controller/ydb/topic/zz_controller.go new file mode 100755 index 0000000..202c3e4 --- /dev/null +++ b/internal/controller/ydb/topic/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package topic + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Topic managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Topic_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Topic_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Topic_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_ydb_topic"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Topic_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Topic + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Topic{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Topic") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.TopicList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.TopicList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Topic_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Topic{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go new file mode 100755 index 0000000..b16d967 --- /dev/null +++ b/internal/controller/zz_setup.go @@ -0,0 +1,286 @@ +package controller + +import ( + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/crossplane/upjet/pkg/controller" + + backendgroup "github.com/tagesjump/provider-upjet-yc/internal/controller/alb/backendgroup" + httprouter "github.com/tagesjump/provider-upjet-yc/internal/controller/alb/httprouter" + loadbalancer "github.com/tagesjump/provider-upjet-yc/internal/controller/alb/loadbalancer" + targetgroup "github.com/tagesjump/provider-upjet-yc/internal/controller/alb/targetgroup" + virtualhost "github.com/tagesjump/provider-upjet-yc/internal/controller/alb/virtualhost" + gateway "github.com/tagesjump/provider-upjet-yc/internal/controller/api/gateway" + trailstrail "github.com/tagesjump/provider-upjet-yc/internal/controller/audit/trailstrail" + policy "github.com/tagesjump/provider-upjet-yc/internal/controller/backup/policy" + policybindings "github.com/tagesjump/provider-upjet-yc/internal/controller/backup/policybindings" + cloudbinding "github.com/tagesjump/provider-upjet-yc/internal/controller/billing/cloudbinding" + origingroup "github.com/tagesjump/provider-upjet-yc/internal/controller/cdn/origingroup" + resource "github.com/tagesjump/provider-upjet-yc/internal/controller/cdn/resource" + certificate "github.com/tagesjump/provider-upjet-yc/internal/controller/cm/certificate" + disk "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/disk" + diskiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/diskiambinding" + diskplacementgroup "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/diskplacementgroup" + diskplacementgroupiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/diskplacementgroupiambinding" + filesystem "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/filesystem" + filesystemiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/filesystemiambinding" + gpucluster "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/gpucluster" + gpuclusteriambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/gpuclusteriambinding" + image "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/image" + imageiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/imageiambinding" + instance "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/instance" + instancegroup "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/instancegroup" + instanceiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/instanceiambinding" + placementgroup "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/placementgroup" + placementgroupiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/placementgroupiambinding" + snapshot "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/snapshot" + snapshotiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/snapshotiambinding" + snapshotschedule "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/snapshotschedule" + snapshotscheduleiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/snapshotscheduleiambinding" + registry "github.com/tagesjump/provider-upjet-yc/internal/controller/container/registry" + registryiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/container/registryiambinding" + registryippermission "github.com/tagesjump/provider-upjet-yc/internal/controller/container/registryippermission" + repository "github.com/tagesjump/provider-upjet-yc/internal/controller/container/repository" + repositoryiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/container/repositoryiambinding" + repositorylifecyclepolicy "github.com/tagesjump/provider-upjet-yc/internal/controller/container/repositorylifecyclepolicy" + cluster "github.com/tagesjump/provider-upjet-yc/internal/controller/dataproc/cluster" + endpoint "github.com/tagesjump/provider-upjet-yc/internal/controller/datatransfer/endpoint" + transfer "github.com/tagesjump/provider-upjet-yc/internal/controller/datatransfer/transfer" + recordset "github.com/tagesjump/provider-upjet-yc/internal/controller/dns/recordset" + zone "github.com/tagesjump/provider-upjet-yc/internal/controller/dns/zone" + zoneiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/dns/zoneiambinding" + iambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/function/iambinding" + scalingpolicy "github.com/tagesjump/provider-upjet-yc/internal/controller/function/scalingpolicy" + trigger "github.com/tagesjump/provider-upjet-yc/internal/controller/function/trigger" + serviceaccount "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccount" + serviceaccountapikey "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountapikey" + serviceaccountiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountiambinding" + serviceaccountiammember "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountiammember" + serviceaccountiampolicy "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountiampolicy" + serviceaccountkey "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountkey" + serviceaccountstaticaccesskey "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountstaticaccesskey" + corebroker "github.com/tagesjump/provider-upjet-yc/internal/controller/iot/corebroker" + coredevice "github.com/tagesjump/provider-upjet-yc/internal/controller/iot/coredevice" + coreregistry "github.com/tagesjump/provider-upjet-yc/internal/controller/iot/coreregistry" + asymmetricencryptionkey "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/asymmetricencryptionkey" + asymmetricencryptionkeyiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/asymmetricencryptionkeyiambinding" + asymmetricsignaturekey "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/asymmetricsignaturekey" + asymmetricsignaturekeyiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/asymmetricsignaturekeyiambinding" + secretciphertext "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/secretciphertext" + symmetrickey "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/symmetrickey" + symmetrickeyiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/symmetrickeyiambinding" + clusterkubernetes "github.com/tagesjump/provider-upjet-yc/internal/controller/kubernetes/cluster" + nodegroup "github.com/tagesjump/provider-upjet-yc/internal/controller/kubernetes/nodegroup" + networkloadbalancer "github.com/tagesjump/provider-upjet-yc/internal/controller/lb/networkloadbalancer" + targetgrouplb "github.com/tagesjump/provider-upjet-yc/internal/controller/lb/targetgroup" + agent "github.com/tagesjump/provider-upjet-yc/internal/controller/loadtesting/agent" + secret "github.com/tagesjump/provider-upjet-yc/internal/controller/lockbox/secret" + secretiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/lockbox/secretiambinding" + secretversion "github.com/tagesjump/provider-upjet-yc/internal/controller/lockbox/secretversion" + group "github.com/tagesjump/provider-upjet-yc/internal/controller/logging/group" + clickhousecluster "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/clickhousecluster" + elasticsearchcluster "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/elasticsearchcluster" + greenplumcluster "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/greenplumcluster" + kafkacluster "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/kafkacluster" + kafkaconnector "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/kafkaconnector" + kafkatopic "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/kafkatopic" + kafkauser "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/kafkauser" + mongodbcluster "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/mongodbcluster" + mongodbdatabase "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/mongodbdatabase" + mongodbuser "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/mongodbuser" + mysqlcluster "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/mysqlcluster" + mysqldatabase "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/mysqldatabase" + mysqluser "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/mysqluser" + postgresqlcluster "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/postgresqlcluster" + postgresqldatabase "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/postgresqldatabase" + postgresqluser "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/postgresqluser" + rediscluster "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/rediscluster" + sqlservercluster "github.com/tagesjump/provider-upjet-yc/internal/controller/mdb/sqlservercluster" + queue "github.com/tagesjump/provider-upjet-yc/internal/controller/message/queue" + dashboard "github.com/tagesjump/provider-upjet-yc/internal/controller/monitoring/dashboard" + grouporganizationmanager "github.com/tagesjump/provider-upjet-yc/internal/controller/organizationmanager/group" + groupiammember "github.com/tagesjump/provider-upjet-yc/internal/controller/organizationmanager/groupiammember" + groupmembership "github.com/tagesjump/provider-upjet-yc/internal/controller/organizationmanager/groupmembership" + organizationiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/organizationmanager/organizationiambinding" + organizationiammember "github.com/tagesjump/provider-upjet-yc/internal/controller/organizationmanager/organizationiammember" + osloginsettings "github.com/tagesjump/provider-upjet-yc/internal/controller/organizationmanager/osloginsettings" + samlfederation "github.com/tagesjump/provider-upjet-yc/internal/controller/organizationmanager/samlfederation" + samlfederationuseraccount "github.com/tagesjump/provider-upjet-yc/internal/controller/organizationmanager/samlfederationuseraccount" + usersshkey "github.com/tagesjump/provider-upjet-yc/internal/controller/organizationmanager/usersshkey" + providerconfig "github.com/tagesjump/provider-upjet-yc/internal/controller/providerconfig" + cloud "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/cloud" + cloudiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/cloudiambinding" + cloudiammember "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/cloudiammember" + folder "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/folder" + folderiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/folderiambinding" + folderiammember "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/folderiammember" + folderiampolicy "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/folderiampolicy" + container "github.com/tagesjump/provider-upjet-yc/internal/controller/serverless/container" + containeriambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/serverless/containeriambinding" + captcha "github.com/tagesjump/provider-upjet-yc/internal/controller/smartcaptcha/captcha" + bucket "github.com/tagesjump/provider-upjet-yc/internal/controller/storage/bucket" + object "github.com/tagesjump/provider-upjet-yc/internal/controller/storage/object" + securityprofile "github.com/tagesjump/provider-upjet-yc/internal/controller/sws/securityprofile" + address "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/address" + defaultsecuritygroup "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/defaultsecuritygroup" + gatewayvpc "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/gateway" + network "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/network" + privateendpoint "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/privateendpoint" + routetable "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/routetable" + securitygroup "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/securitygroup" + securitygrouprule "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/securitygrouprule" + subnet "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/subnet" + function "github.com/tagesjump/provider-upjet-yc/internal/controller/yandex/function" + databasededicated "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/databasededicated" + databaseiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/databaseiambinding" + databaseserverless "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/databaseserverless" + table "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/table" + tablechangefeed "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/tablechangefeed" + tableindex "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/tableindex" + topic "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/topic" +) + +// Setup creates all controllers with the supplied logger and adds them to +// the supplied manager. +func Setup(mgr ctrl.Manager, o controller.Options) error { + for _, setup := range []func(ctrl.Manager, controller.Options) error{ + backendgroup.Setup, + httprouter.Setup, + loadbalancer.Setup, + targetgroup.Setup, + virtualhost.Setup, + gateway.Setup, + trailstrail.Setup, + policy.Setup, + policybindings.Setup, + cloudbinding.Setup, + origingroup.Setup, + resource.Setup, + certificate.Setup, + disk.Setup, + diskiambinding.Setup, + diskplacementgroup.Setup, + diskplacementgroupiambinding.Setup, + filesystem.Setup, + filesystemiambinding.Setup, + gpucluster.Setup, + gpuclusteriambinding.Setup, + image.Setup, + imageiambinding.Setup, + instance.Setup, + instancegroup.Setup, + instanceiambinding.Setup, + placementgroup.Setup, + placementgroupiambinding.Setup, + snapshot.Setup, + snapshotiambinding.Setup, + snapshotschedule.Setup, + snapshotscheduleiambinding.Setup, + registry.Setup, + registryiambinding.Setup, + registryippermission.Setup, + repository.Setup, + repositoryiambinding.Setup, + repositorylifecyclepolicy.Setup, + cluster.Setup, + endpoint.Setup, + transfer.Setup, + recordset.Setup, + zone.Setup, + zoneiambinding.Setup, + iambinding.Setup, + scalingpolicy.Setup, + trigger.Setup, + serviceaccount.Setup, + serviceaccountapikey.Setup, + serviceaccountiambinding.Setup, + serviceaccountiammember.Setup, + serviceaccountiampolicy.Setup, + serviceaccountkey.Setup, + serviceaccountstaticaccesskey.Setup, + corebroker.Setup, + coredevice.Setup, + coreregistry.Setup, + asymmetricencryptionkey.Setup, + asymmetricencryptionkeyiambinding.Setup, + asymmetricsignaturekey.Setup, + asymmetricsignaturekeyiambinding.Setup, + secretciphertext.Setup, + symmetrickey.Setup, + symmetrickeyiambinding.Setup, + clusterkubernetes.Setup, + nodegroup.Setup, + networkloadbalancer.Setup, + targetgrouplb.Setup, + agent.Setup, + secret.Setup, + secretiambinding.Setup, + secretversion.Setup, + group.Setup, + clickhousecluster.Setup, + elasticsearchcluster.Setup, + greenplumcluster.Setup, + kafkacluster.Setup, + kafkaconnector.Setup, + kafkatopic.Setup, + kafkauser.Setup, + mongodbcluster.Setup, + mongodbdatabase.Setup, + mongodbuser.Setup, + mysqlcluster.Setup, + mysqldatabase.Setup, + mysqluser.Setup, + postgresqlcluster.Setup, + postgresqldatabase.Setup, + postgresqluser.Setup, + rediscluster.Setup, + sqlservercluster.Setup, + queue.Setup, + dashboard.Setup, + grouporganizationmanager.Setup, + groupiammember.Setup, + groupmembership.Setup, + organizationiambinding.Setup, + organizationiammember.Setup, + osloginsettings.Setup, + samlfederation.Setup, + samlfederationuseraccount.Setup, + usersshkey.Setup, + providerconfig.Setup, + cloud.Setup, + cloudiambinding.Setup, + cloudiammember.Setup, + folder.Setup, + folderiambinding.Setup, + folderiammember.Setup, + folderiampolicy.Setup, + container.Setup, + containeriambinding.Setup, + captcha.Setup, + bucket.Setup, + object.Setup, + securityprofile.Setup, + address.Setup, + defaultsecuritygroup.Setup, + gatewayvpc.Setup, + network.Setup, + privateendpoint.Setup, + routetable.Setup, + securitygroup.Setup, + securitygrouprule.Setup, + subnet.Setup, + function.Setup, + databasededicated.Setup, + databaseiambinding.Setup, + databaseserverless.Setup, + table.Setup, + tablechangefeed.Setup, + tableindex.Setup, + topic.Setup, + } { + if err := setup(mgr, o); err != nil { + return err + } + } + return nil +} diff --git a/package/crds/alb.yandex-cloud.upjet.crossplane.io_backendgroups.yaml b/package/crds/alb.yandex-cloud.upjet.crossplane.io_backendgroups.yaml new file mode 100644 index 0000000..2e3ead5 --- /dev/null +++ b/package/crds/alb.yandex-cloud.upjet.crossplane.io_backendgroups.yaml @@ -0,0 +1,2634 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: backendgroups.alb.yandex-cloud.upjet.crossplane.io +spec: + group: alb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: BackendGroup + listKind: BackendGroupList + plural: backendgroups + singular: backendgroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: BackendGroup is the Schema for the BackendGroups API. An application + load balancer distributes the load across cloud resources that are combined + into a backend group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackendGroupSpec defines the desired state of BackendGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the backend group. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + grpcBackend: + description: Grpc backend specification that will be used by the + ALB Backend Group. Structure is documented below. + items: + properties: + healthcheck: + description: Healthcheck specification that will be used + by this backend. Structure is documented below. + items: + properties: + grpcHealthcheck: + description: Grpc Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + serviceName: + description: Service name for grpc.health.v1.HealthCheckRequest + message. + type: string + type: object + type: array + healthcheckPort: + description: Optional alternative port for health + checking. + type: number + healthyThreshold: + description: Number of consecutive successful health + checks required to promote endpoint into the healthy + state. 0 means 1. Note that during startup, only + a single successful health check is required to + mark a host healthy. + type: number + httpHealthcheck: + description: Http Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + host: + description: '"Host" HTTP header value.' + type: string + http2: + description: If set, health checks will use + HTTP2. + type: boolean + path: + description: HTTP path. + type: string + type: object + type: array + interval: + description: Interval between health checks. + type: string + intervalJitterPercent: + description: An optional jitter amount as a percentage + of interval. If specified, during every interval + value of (interval_ms * interval_jitter_percent + / 100) will be added to the wait time. + type: number + streamHealthcheck: + description: Stream Healthcheck specification that + will be used by this healthcheck. Structure is documented + below. + items: + properties: + receive: + description: Data that must be contained in + the messages received from targets for a successful + health check. If not specified, no messages + are expected from targets, and those that + are received are not checked. + type: string + send: + description: Message sent to targets during + TCP data transfer. If not specified, no data + is sent to the target. + type: string + type: object + type: array + timeout: + description: Time to wait for a health check response. + type: string + unhealthyThreshold: + description: Number of consecutive failed health checks + required to demote endpoint into the unhealthy state. + 0 means 1. Note that for HTTP health checks, a single + 503 immediately makes endpoint unhealthy. + type: number + type: object + type: array + loadBalancingConfig: + description: Load Balancing Config specification that will + be used by this backend. Structure is documented below. + items: + properties: + localityAwareRoutingPercent: + description: Percent of traffic to be sent to the + same availability zone. The rest will be equally + divided between other zones. + type: number + mode: + description: 'Load balancing mode for the backend. + Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", + "MAGLEV_HASH".' + type: string + panicThreshold: + description: If percentage of healthy hosts in the + backend is lower than panic_threshold, traffic will + be routed to all backends no matter what the health + status is. This helps to avoid healthy backends + overloading when everything is bad. Zero means no + panic threshold. + type: number + strictLocality: + description: If set, will route requests only to the + same availability zone. Balancer won't know about + endpoints in other zones. + type: boolean + type: object + type: array + name: + description: Name of the backend. + type: string + port: + description: Port for incoming traffic. + type: number + targetGroupIds: + description: References target groups for the backend. + items: + type: string + type: array + targetGroupIdsRefs: + description: References to TargetGroup to populate targetGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + targetGroupIdsSelector: + description: Selector for a list of TargetGroup to populate + targetGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tls: + description: Tls specification that will be used by this + backend. Structure is documented below. + items: + properties: + sni: + description: SNI string for TLS connections. + type: string + validationContext: + items: + properties: + trustedCaBytes: + description: PEM-encoded trusted CA certificate + chain. + type: string + trustedCaId: + description: Trusted CA certificate ID in the + Certificate Manager. + type: string + type: object + type: array + type: object + type: array + weight: + description: Weight of the backend. Traffic will be split + between backends of the same BackendGroup according to + their weights. + type: number + type: object + type: array + httpBackend: + description: Http backend specification that will be used by the + ALB Backend Group. Structure is documented below. + items: + properties: + healthcheck: + description: Healthcheck specification that will be used + by this backend. Structure is documented below. + items: + properties: + grpcHealthcheck: + description: Grpc Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + serviceName: + description: Service name for grpc.health.v1.HealthCheckRequest + message. + type: string + type: object + type: array + healthcheckPort: + description: Optional alternative port for health + checking. + type: number + healthyThreshold: + description: Number of consecutive successful health + checks required to promote endpoint into the healthy + state. 0 means 1. Note that during startup, only + a single successful health check is required to + mark a host healthy. + type: number + httpHealthcheck: + description: Http Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + host: + description: '"Host" HTTP header value.' + type: string + http2: + description: If set, health checks will use + HTTP2. + type: boolean + path: + description: HTTP path. + type: string + type: object + type: array + interval: + description: Interval between health checks. + type: string + intervalJitterPercent: + description: An optional jitter amount as a percentage + of interval. If specified, during every interval + value of (interval_ms * interval_jitter_percent + / 100) will be added to the wait time. + type: number + streamHealthcheck: + description: Stream Healthcheck specification that + will be used by this healthcheck. Structure is documented + below. + items: + properties: + receive: + description: Data that must be contained in + the messages received from targets for a successful + health check. If not specified, no messages + are expected from targets, and those that + are received are not checked. + type: string + send: + description: Message sent to targets during + TCP data transfer. If not specified, no data + is sent to the target. + type: string + type: object + type: array + timeout: + description: Time to wait for a health check response. + type: string + unhealthyThreshold: + description: Number of consecutive failed health checks + required to demote endpoint into the unhealthy state. + 0 means 1. Note that for HTTP health checks, a single + 503 immediately makes endpoint unhealthy. + type: number + type: object + type: array + http2: + description: Enables HTTP2 for upstream requests. If not + set, HTTP 1.1 will be used by default. + type: boolean + loadBalancingConfig: + description: Load Balancing Config specification that will + be used by this backend. Structure is documented below. + items: + properties: + localityAwareRoutingPercent: + description: Percent of traffic to be sent to the + same availability zone. The rest will be equally + divided between other zones. + type: number + mode: + description: 'Load balancing mode for the backend. + Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", + "MAGLEV_HASH".' + type: string + panicThreshold: + description: If percentage of healthy hosts in the + backend is lower than panic_threshold, traffic will + be routed to all backends no matter what the health + status is. This helps to avoid healthy backends + overloading when everything is bad. Zero means no + panic threshold. + type: number + strictLocality: + description: If set, will route requests only to the + same availability zone. Balancer won't know about + endpoints in other zones. + type: boolean + type: object + type: array + name: + description: Name of the backend. + type: string + port: + description: Port for incoming traffic. + type: number + storageBucket: + type: string + targetGroupIds: + description: References target groups for the backend. + items: + type: string + type: array + targetGroupIdsRefs: + description: References to TargetGroup to populate targetGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + targetGroupIdsSelector: + description: Selector for a list of TargetGroup to populate + targetGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tls: + description: Tls specification that will be used by this + backend. Structure is documented below. + items: + properties: + sni: + description: SNI string for TLS connections. + type: string + validationContext: + items: + properties: + trustedCaBytes: + description: PEM-encoded trusted CA certificate + chain. + type: string + trustedCaId: + description: Trusted CA certificate ID in the + Certificate Manager. + type: string + type: object + type: array + type: object + type: array + weight: + description: Weight of the backend. Traffic will be split + between backends of the same BackendGroup according to + their weights. + type: number + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels to assign to this backend group. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the Backend Group. + type: string + sessionAffinity: + description: Session affinity mode determines how incoming requests + are grouped into one session. Structure is documented below. + items: + properties: + connection: + description: |- + Requests received from the same IP are combined into a session. Stream backend groups only support session affinity by client IP address. Structure is documented below. + IP address affinity + items: + properties: + sourceIp: + description: |- + Source IP address to use with affinity. + Use source IP address + type: boolean + type: object + type: array + cookie: + description: |- + Requests with the same cookie value and the specified file name are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + Cookie affinity + items: + properties: + name: + description: |- + Name of the backend. + Name of the HTTP cookie + type: string + ttl: + description: |- + TTL for the cookie (if not set, session cookie will be used) + TTL for the cookie (if not set, session cookie will be used) + type: string + type: object + type: array + header: + description: |- + Requests with the same value of the specified HTTP header, such as with user authentication data, are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + Request header affinity + items: + properties: + headerName: + description: |- + The name of the request header that will be used with affinity. + The name of the request header that will be used + type: string + type: object + type: array + type: object + type: array + streamBackend: + description: Stream backend specification that will be used by + the ALB Backend Group. Structure is documented below. + items: + properties: + enableProxyProtocol: + type: boolean + healthcheck: + description: Healthcheck specification that will be used + by this backend. Structure is documented below. + items: + properties: + grpcHealthcheck: + description: Grpc Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + serviceName: + description: Service name for grpc.health.v1.HealthCheckRequest + message. + type: string + type: object + type: array + healthcheckPort: + description: Optional alternative port for health + checking. + type: number + healthyThreshold: + description: Number of consecutive successful health + checks required to promote endpoint into the healthy + state. 0 means 1. Note that during startup, only + a single successful health check is required to + mark a host healthy. + type: number + httpHealthcheck: + description: Http Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + host: + description: '"Host" HTTP header value.' + type: string + http2: + description: If set, health checks will use + HTTP2. + type: boolean + path: + description: HTTP path. + type: string + type: object + type: array + interval: + description: Interval between health checks. + type: string + intervalJitterPercent: + description: An optional jitter amount as a percentage + of interval. If specified, during every interval + value of (interval_ms * interval_jitter_percent + / 100) will be added to the wait time. + type: number + streamHealthcheck: + description: Stream Healthcheck specification that + will be used by this healthcheck. Structure is documented + below. + items: + properties: + receive: + description: Data that must be contained in + the messages received from targets for a successful + health check. If not specified, no messages + are expected from targets, and those that + are received are not checked. + type: string + send: + description: Message sent to targets during + TCP data transfer. If not specified, no data + is sent to the target. + type: string + type: object + type: array + timeout: + description: Time to wait for a health check response. + type: string + unhealthyThreshold: + description: Number of consecutive failed health checks + required to demote endpoint into the unhealthy state. + 0 means 1. Note that for HTTP health checks, a single + 503 immediately makes endpoint unhealthy. + type: number + type: object + type: array + loadBalancingConfig: + description: Load Balancing Config specification that will + be used by this backend. Structure is documented below. + items: + properties: + localityAwareRoutingPercent: + description: Percent of traffic to be sent to the + same availability zone. The rest will be equally + divided between other zones. + type: number + mode: + description: 'Load balancing mode for the backend. + Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", + "MAGLEV_HASH".' + type: string + panicThreshold: + description: If percentage of healthy hosts in the + backend is lower than panic_threshold, traffic will + be routed to all backends no matter what the health + status is. This helps to avoid healthy backends + overloading when everything is bad. Zero means no + panic threshold. + type: number + strictLocality: + description: If set, will route requests only to the + same availability zone. Balancer won't know about + endpoints in other zones. + type: boolean + type: object + type: array + name: + description: Name of the backend. + type: string + port: + description: Port for incoming traffic. + type: number + targetGroupIds: + description: References target groups for the backend. + items: + type: string + type: array + targetGroupIdsRefs: + description: References to TargetGroup to populate targetGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + targetGroupIdsSelector: + description: Selector for a list of TargetGroup to populate + targetGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tls: + description: Tls specification that will be used by this + backend. Structure is documented below. + items: + properties: + sni: + description: SNI string for TLS connections. + type: string + validationContext: + items: + properties: + trustedCaBytes: + description: PEM-encoded trusted CA certificate + chain. + type: string + trustedCaId: + description: Trusted CA certificate ID in the + Certificate Manager. + type: string + type: object + type: array + type: object + type: array + weight: + description: Weight of the backend. Traffic will be split + between backends of the same BackendGroup according to + their weights. + type: number + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the backend group. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + grpcBackend: + description: Grpc backend specification that will be used by the + ALB Backend Group. Structure is documented below. + items: + properties: + healthcheck: + description: Healthcheck specification that will be used + by this backend. Structure is documented below. + items: + properties: + grpcHealthcheck: + description: Grpc Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + serviceName: + description: Service name for grpc.health.v1.HealthCheckRequest + message. + type: string + type: object + type: array + healthcheckPort: + description: Optional alternative port for health + checking. + type: number + healthyThreshold: + description: Number of consecutive successful health + checks required to promote endpoint into the healthy + state. 0 means 1. Note that during startup, only + a single successful health check is required to + mark a host healthy. + type: number + httpHealthcheck: + description: Http Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + host: + description: '"Host" HTTP header value.' + type: string + http2: + description: If set, health checks will use + HTTP2. + type: boolean + path: + description: HTTP path. + type: string + type: object + type: array + interval: + description: Interval between health checks. + type: string + intervalJitterPercent: + description: An optional jitter amount as a percentage + of interval. If specified, during every interval + value of (interval_ms * interval_jitter_percent + / 100) will be added to the wait time. + type: number + streamHealthcheck: + description: Stream Healthcheck specification that + will be used by this healthcheck. Structure is documented + below. + items: + properties: + receive: + description: Data that must be contained in + the messages received from targets for a successful + health check. If not specified, no messages + are expected from targets, and those that + are received are not checked. + type: string + send: + description: Message sent to targets during + TCP data transfer. If not specified, no data + is sent to the target. + type: string + type: object + type: array + timeout: + description: Time to wait for a health check response. + type: string + unhealthyThreshold: + description: Number of consecutive failed health checks + required to demote endpoint into the unhealthy state. + 0 means 1. Note that for HTTP health checks, a single + 503 immediately makes endpoint unhealthy. + type: number + type: object + type: array + loadBalancingConfig: + description: Load Balancing Config specification that will + be used by this backend. Structure is documented below. + items: + properties: + localityAwareRoutingPercent: + description: Percent of traffic to be sent to the + same availability zone. The rest will be equally + divided between other zones. + type: number + mode: + description: 'Load balancing mode for the backend. + Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", + "MAGLEV_HASH".' + type: string + panicThreshold: + description: If percentage of healthy hosts in the + backend is lower than panic_threshold, traffic will + be routed to all backends no matter what the health + status is. This helps to avoid healthy backends + overloading when everything is bad. Zero means no + panic threshold. + type: number + strictLocality: + description: If set, will route requests only to the + same availability zone. Balancer won't know about + endpoints in other zones. + type: boolean + type: object + type: array + name: + description: Name of the backend. + type: string + port: + description: Port for incoming traffic. + type: number + targetGroupIds: + description: References target groups for the backend. + items: + type: string + type: array + targetGroupIdsRefs: + description: References to TargetGroup to populate targetGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + targetGroupIdsSelector: + description: Selector for a list of TargetGroup to populate + targetGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tls: + description: Tls specification that will be used by this + backend. Structure is documented below. + items: + properties: + sni: + description: SNI string for TLS connections. + type: string + validationContext: + items: + properties: + trustedCaBytes: + description: PEM-encoded trusted CA certificate + chain. + type: string + trustedCaId: + description: Trusted CA certificate ID in the + Certificate Manager. + type: string + type: object + type: array + type: object + type: array + weight: + description: Weight of the backend. Traffic will be split + between backends of the same BackendGroup according to + their weights. + type: number + type: object + type: array + httpBackend: + description: Http backend specification that will be used by the + ALB Backend Group. Structure is documented below. + items: + properties: + healthcheck: + description: Healthcheck specification that will be used + by this backend. Structure is documented below. + items: + properties: + grpcHealthcheck: + description: Grpc Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + serviceName: + description: Service name for grpc.health.v1.HealthCheckRequest + message. + type: string + type: object + type: array + healthcheckPort: + description: Optional alternative port for health + checking. + type: number + healthyThreshold: + description: Number of consecutive successful health + checks required to promote endpoint into the healthy + state. 0 means 1. Note that during startup, only + a single successful health check is required to + mark a host healthy. + type: number + httpHealthcheck: + description: Http Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + host: + description: '"Host" HTTP header value.' + type: string + http2: + description: If set, health checks will use + HTTP2. + type: boolean + path: + description: HTTP path. + type: string + type: object + type: array + interval: + description: Interval between health checks. + type: string + intervalJitterPercent: + description: An optional jitter amount as a percentage + of interval. If specified, during every interval + value of (interval_ms * interval_jitter_percent + / 100) will be added to the wait time. + type: number + streamHealthcheck: + description: Stream Healthcheck specification that + will be used by this healthcheck. Structure is documented + below. + items: + properties: + receive: + description: Data that must be contained in + the messages received from targets for a successful + health check. If not specified, no messages + are expected from targets, and those that + are received are not checked. + type: string + send: + description: Message sent to targets during + TCP data transfer. If not specified, no data + is sent to the target. + type: string + type: object + type: array + timeout: + description: Time to wait for a health check response. + type: string + unhealthyThreshold: + description: Number of consecutive failed health checks + required to demote endpoint into the unhealthy state. + 0 means 1. Note that for HTTP health checks, a single + 503 immediately makes endpoint unhealthy. + type: number + type: object + type: array + http2: + description: Enables HTTP2 for upstream requests. If not + set, HTTP 1.1 will be used by default. + type: boolean + loadBalancingConfig: + description: Load Balancing Config specification that will + be used by this backend. Structure is documented below. + items: + properties: + localityAwareRoutingPercent: + description: Percent of traffic to be sent to the + same availability zone. The rest will be equally + divided between other zones. + type: number + mode: + description: 'Load balancing mode for the backend. + Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", + "MAGLEV_HASH".' + type: string + panicThreshold: + description: If percentage of healthy hosts in the + backend is lower than panic_threshold, traffic will + be routed to all backends no matter what the health + status is. This helps to avoid healthy backends + overloading when everything is bad. Zero means no + panic threshold. + type: number + strictLocality: + description: If set, will route requests only to the + same availability zone. Balancer won't know about + endpoints in other zones. + type: boolean + type: object + type: array + name: + description: Name of the backend. + type: string + port: + description: Port for incoming traffic. + type: number + storageBucket: + type: string + targetGroupIds: + description: References target groups for the backend. + items: + type: string + type: array + targetGroupIdsRefs: + description: References to TargetGroup to populate targetGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + targetGroupIdsSelector: + description: Selector for a list of TargetGroup to populate + targetGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tls: + description: Tls specification that will be used by this + backend. Structure is documented below. + items: + properties: + sni: + description: SNI string for TLS connections. + type: string + validationContext: + items: + properties: + trustedCaBytes: + description: PEM-encoded trusted CA certificate + chain. + type: string + trustedCaId: + description: Trusted CA certificate ID in the + Certificate Manager. + type: string + type: object + type: array + type: object + type: array + weight: + description: Weight of the backend. Traffic will be split + between backends of the same BackendGroup according to + their weights. + type: number + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels to assign to this backend group. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the Backend Group. + type: string + sessionAffinity: + description: Session affinity mode determines how incoming requests + are grouped into one session. Structure is documented below. + items: + properties: + connection: + description: |- + Requests received from the same IP are combined into a session. Stream backend groups only support session affinity by client IP address. Structure is documented below. + IP address affinity + items: + properties: + sourceIp: + description: |- + Source IP address to use with affinity. + Use source IP address + type: boolean + type: object + type: array + cookie: + description: |- + Requests with the same cookie value and the specified file name are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + Cookie affinity + items: + properties: + name: + description: |- + Name of the backend. + Name of the HTTP cookie + type: string + ttl: + description: |- + TTL for the cookie (if not set, session cookie will be used) + TTL for the cookie (if not set, session cookie will be used) + type: string + type: object + type: array + header: + description: |- + Requests with the same value of the specified HTTP header, such as with user authentication data, are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + Request header affinity + items: + properties: + headerName: + description: |- + The name of the request header that will be used with affinity. + The name of the request header that will be used + type: string + type: object + type: array + type: object + type: array + streamBackend: + description: Stream backend specification that will be used by + the ALB Backend Group. Structure is documented below. + items: + properties: + enableProxyProtocol: + type: boolean + healthcheck: + description: Healthcheck specification that will be used + by this backend. Structure is documented below. + items: + properties: + grpcHealthcheck: + description: Grpc Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + serviceName: + description: Service name for grpc.health.v1.HealthCheckRequest + message. + type: string + type: object + type: array + healthcheckPort: + description: Optional alternative port for health + checking. + type: number + healthyThreshold: + description: Number of consecutive successful health + checks required to promote endpoint into the healthy + state. 0 means 1. Note that during startup, only + a single successful health check is required to + mark a host healthy. + type: number + httpHealthcheck: + description: Http Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + host: + description: '"Host" HTTP header value.' + type: string + http2: + description: If set, health checks will use + HTTP2. + type: boolean + path: + description: HTTP path. + type: string + type: object + type: array + interval: + description: Interval between health checks. + type: string + intervalJitterPercent: + description: An optional jitter amount as a percentage + of interval. If specified, during every interval + value of (interval_ms * interval_jitter_percent + / 100) will be added to the wait time. + type: number + streamHealthcheck: + description: Stream Healthcheck specification that + will be used by this healthcheck. Structure is documented + below. + items: + properties: + receive: + description: Data that must be contained in + the messages received from targets for a successful + health check. If not specified, no messages + are expected from targets, and those that + are received are not checked. + type: string + send: + description: Message sent to targets during + TCP data transfer. If not specified, no data + is sent to the target. + type: string + type: object + type: array + timeout: + description: Time to wait for a health check response. + type: string + unhealthyThreshold: + description: Number of consecutive failed health checks + required to demote endpoint into the unhealthy state. + 0 means 1. Note that for HTTP health checks, a single + 503 immediately makes endpoint unhealthy. + type: number + type: object + type: array + loadBalancingConfig: + description: Load Balancing Config specification that will + be used by this backend. Structure is documented below. + items: + properties: + localityAwareRoutingPercent: + description: Percent of traffic to be sent to the + same availability zone. The rest will be equally + divided between other zones. + type: number + mode: + description: 'Load balancing mode for the backend. + Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", + "MAGLEV_HASH".' + type: string + panicThreshold: + description: If percentage of healthy hosts in the + backend is lower than panic_threshold, traffic will + be routed to all backends no matter what the health + status is. This helps to avoid healthy backends + overloading when everything is bad. Zero means no + panic threshold. + type: number + strictLocality: + description: If set, will route requests only to the + same availability zone. Balancer won't know about + endpoints in other zones. + type: boolean + type: object + type: array + name: + description: Name of the backend. + type: string + port: + description: Port for incoming traffic. + type: number + targetGroupIds: + description: References target groups for the backend. + items: + type: string + type: array + targetGroupIdsRefs: + description: References to TargetGroup to populate targetGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + targetGroupIdsSelector: + description: Selector for a list of TargetGroup to populate + targetGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tls: + description: Tls specification that will be used by this + backend. Structure is documented below. + items: + properties: + sni: + description: SNI string for TLS connections. + type: string + validationContext: + items: + properties: + trustedCaBytes: + description: PEM-encoded trusted CA certificate + chain. + type: string + trustedCaId: + description: Trusted CA certificate ID in the + Certificate Manager. + type: string + type: object + type: array + type: object + type: array + weight: + description: Weight of the backend. Traffic will be split + between backends of the same BackendGroup according to + their weights. + type: number + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: BackendGroupStatus defines the observed state of BackendGroup. + properties: + atProvider: + properties: + createdAt: + description: The backend group creation timestamp. + type: string + description: + description: Description of the backend group. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + grpcBackend: + description: Grpc backend specification that will be used by the + ALB Backend Group. Structure is documented below. + items: + properties: + healthcheck: + description: Healthcheck specification that will be used + by this backend. Structure is documented below. + items: + properties: + grpcHealthcheck: + description: Grpc Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + serviceName: + description: Service name for grpc.health.v1.HealthCheckRequest + message. + type: string + type: object + type: array + healthcheckPort: + description: Optional alternative port for health + checking. + type: number + healthyThreshold: + description: Number of consecutive successful health + checks required to promote endpoint into the healthy + state. 0 means 1. Note that during startup, only + a single successful health check is required to + mark a host healthy. + type: number + httpHealthcheck: + description: Http Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + host: + description: '"Host" HTTP header value.' + type: string + http2: + description: If set, health checks will use + HTTP2. + type: boolean + path: + description: HTTP path. + type: string + type: object + type: array + interval: + description: Interval between health checks. + type: string + intervalJitterPercent: + description: An optional jitter amount as a percentage + of interval. If specified, during every interval + value of (interval_ms * interval_jitter_percent + / 100) will be added to the wait time. + type: number + streamHealthcheck: + description: Stream Healthcheck specification that + will be used by this healthcheck. Structure is documented + below. + items: + properties: + receive: + description: Data that must be contained in + the messages received from targets for a successful + health check. If not specified, no messages + are expected from targets, and those that + are received are not checked. + type: string + send: + description: Message sent to targets during + TCP data transfer. If not specified, no data + is sent to the target. + type: string + type: object + type: array + timeout: + description: Time to wait for a health check response. + type: string + unhealthyThreshold: + description: Number of consecutive failed health checks + required to demote endpoint into the unhealthy state. + 0 means 1. Note that for HTTP health checks, a single + 503 immediately makes endpoint unhealthy. + type: number + type: object + type: array + loadBalancingConfig: + description: Load Balancing Config specification that will + be used by this backend. Structure is documented below. + items: + properties: + localityAwareRoutingPercent: + description: Percent of traffic to be sent to the + same availability zone. The rest will be equally + divided between other zones. + type: number + mode: + description: 'Load balancing mode for the backend. + Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", + "MAGLEV_HASH".' + type: string + panicThreshold: + description: If percentage of healthy hosts in the + backend is lower than panic_threshold, traffic will + be routed to all backends no matter what the health + status is. This helps to avoid healthy backends + overloading when everything is bad. Zero means no + panic threshold. + type: number + strictLocality: + description: If set, will route requests only to the + same availability zone. Balancer won't know about + endpoints in other zones. + type: boolean + type: object + type: array + name: + description: Name of the backend. + type: string + port: + description: Port for incoming traffic. + type: number + targetGroupIds: + description: References target groups for the backend. + items: + type: string + type: array + tls: + description: Tls specification that will be used by this + backend. Structure is documented below. + items: + properties: + sni: + description: SNI string for TLS connections. + type: string + validationContext: + items: + properties: + trustedCaBytes: + description: PEM-encoded trusted CA certificate + chain. + type: string + trustedCaId: + description: Trusted CA certificate ID in the + Certificate Manager. + type: string + type: object + type: array + type: object + type: array + weight: + description: Weight of the backend. Traffic will be split + between backends of the same BackendGroup according to + their weights. + type: number + type: object + type: array + httpBackend: + description: Http backend specification that will be used by the + ALB Backend Group. Structure is documented below. + items: + properties: + healthcheck: + description: Healthcheck specification that will be used + by this backend. Structure is documented below. + items: + properties: + grpcHealthcheck: + description: Grpc Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + serviceName: + description: Service name for grpc.health.v1.HealthCheckRequest + message. + type: string + type: object + type: array + healthcheckPort: + description: Optional alternative port for health + checking. + type: number + healthyThreshold: + description: Number of consecutive successful health + checks required to promote endpoint into the healthy + state. 0 means 1. Note that during startup, only + a single successful health check is required to + mark a host healthy. + type: number + httpHealthcheck: + description: Http Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + host: + description: '"Host" HTTP header value.' + type: string + http2: + description: If set, health checks will use + HTTP2. + type: boolean + path: + description: HTTP path. + type: string + type: object + type: array + interval: + description: Interval between health checks. + type: string + intervalJitterPercent: + description: An optional jitter amount as a percentage + of interval. If specified, during every interval + value of (interval_ms * interval_jitter_percent + / 100) will be added to the wait time. + type: number + streamHealthcheck: + description: Stream Healthcheck specification that + will be used by this healthcheck. Structure is documented + below. + items: + properties: + receive: + description: Data that must be contained in + the messages received from targets for a successful + health check. If not specified, no messages + are expected from targets, and those that + are received are not checked. + type: string + send: + description: Message sent to targets during + TCP data transfer. If not specified, no data + is sent to the target. + type: string + type: object + type: array + timeout: + description: Time to wait for a health check response. + type: string + unhealthyThreshold: + description: Number of consecutive failed health checks + required to demote endpoint into the unhealthy state. + 0 means 1. Note that for HTTP health checks, a single + 503 immediately makes endpoint unhealthy. + type: number + type: object + type: array + http2: + description: Enables HTTP2 for upstream requests. If not + set, HTTP 1.1 will be used by default. + type: boolean + loadBalancingConfig: + description: Load Balancing Config specification that will + be used by this backend. Structure is documented below. + items: + properties: + localityAwareRoutingPercent: + description: Percent of traffic to be sent to the + same availability zone. The rest will be equally + divided between other zones. + type: number + mode: + description: 'Load balancing mode for the backend. + Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", + "MAGLEV_HASH".' + type: string + panicThreshold: + description: If percentage of healthy hosts in the + backend is lower than panic_threshold, traffic will + be routed to all backends no matter what the health + status is. This helps to avoid healthy backends + overloading when everything is bad. Zero means no + panic threshold. + type: number + strictLocality: + description: If set, will route requests only to the + same availability zone. Balancer won't know about + endpoints in other zones. + type: boolean + type: object + type: array + name: + description: Name of the backend. + type: string + port: + description: Port for incoming traffic. + type: number + storageBucket: + type: string + targetGroupIds: + description: References target groups for the backend. + items: + type: string + type: array + tls: + description: Tls specification that will be used by this + backend. Structure is documented below. + items: + properties: + sni: + description: SNI string for TLS connections. + type: string + validationContext: + items: + properties: + trustedCaBytes: + description: PEM-encoded trusted CA certificate + chain. + type: string + trustedCaId: + description: Trusted CA certificate ID in the + Certificate Manager. + type: string + type: object + type: array + type: object + type: array + weight: + description: Weight of the backend. Traffic will be split + between backends of the same BackendGroup according to + their weights. + type: number + type: object + type: array + id: + description: The ID of the backend group. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this backend group. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the Backend Group. + type: string + sessionAffinity: + description: Session affinity mode determines how incoming requests + are grouped into one session. Structure is documented below. + items: + properties: + connection: + description: |- + Requests received from the same IP are combined into a session. Stream backend groups only support session affinity by client IP address. Structure is documented below. + IP address affinity + items: + properties: + sourceIp: + description: |- + Source IP address to use with affinity. + Use source IP address + type: boolean + type: object + type: array + cookie: + description: |- + Requests with the same cookie value and the specified file name are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + Cookie affinity + items: + properties: + name: + description: |- + Name of the backend. + Name of the HTTP cookie + type: string + ttl: + description: |- + TTL for the cookie (if not set, session cookie will be used) + TTL for the cookie (if not set, session cookie will be used) + type: string + type: object + type: array + header: + description: |- + Requests with the same value of the specified HTTP header, such as with user authentication data, are combined into a session. Allowed only for HTTP and gRPC backend groups. Structure is documented below. + Request header affinity + items: + properties: + headerName: + description: |- + The name of the request header that will be used with affinity. + The name of the request header that will be used + type: string + type: object + type: array + type: object + type: array + streamBackend: + description: Stream backend specification that will be used by + the ALB Backend Group. Structure is documented below. + items: + properties: + enableProxyProtocol: + type: boolean + healthcheck: + description: Healthcheck specification that will be used + by this backend. Structure is documented below. + items: + properties: + grpcHealthcheck: + description: Grpc Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + serviceName: + description: Service name for grpc.health.v1.HealthCheckRequest + message. + type: string + type: object + type: array + healthcheckPort: + description: Optional alternative port for health + checking. + type: number + healthyThreshold: + description: Number of consecutive successful health + checks required to promote endpoint into the healthy + state. 0 means 1. Note that during startup, only + a single successful health check is required to + mark a host healthy. + type: number + httpHealthcheck: + description: Http Healthcheck specification that will + be used by this healthcheck. Structure is documented + below. + items: + properties: + host: + description: '"Host" HTTP header value.' + type: string + http2: + description: If set, health checks will use + HTTP2. + type: boolean + path: + description: HTTP path. + type: string + type: object + type: array + interval: + description: Interval between health checks. + type: string + intervalJitterPercent: + description: An optional jitter amount as a percentage + of interval. If specified, during every interval + value of (interval_ms * interval_jitter_percent + / 100) will be added to the wait time. + type: number + streamHealthcheck: + description: Stream Healthcheck specification that + will be used by this healthcheck. Structure is documented + below. + items: + properties: + receive: + description: Data that must be contained in + the messages received from targets for a successful + health check. If not specified, no messages + are expected from targets, and those that + are received are not checked. + type: string + send: + description: Message sent to targets during + TCP data transfer. If not specified, no data + is sent to the target. + type: string + type: object + type: array + timeout: + description: Time to wait for a health check response. + type: string + unhealthyThreshold: + description: Number of consecutive failed health checks + required to demote endpoint into the unhealthy state. + 0 means 1. Note that for HTTP health checks, a single + 503 immediately makes endpoint unhealthy. + type: number + type: object + type: array + loadBalancingConfig: + description: Load Balancing Config specification that will + be used by this backend. Structure is documented below. + items: + properties: + localityAwareRoutingPercent: + description: Percent of traffic to be sent to the + same availability zone. The rest will be equally + divided between other zones. + type: number + mode: + description: 'Load balancing mode for the backend. + Possible values: "ROUND_ROBIN", "RANDOM", "LEAST_REQUEST", + "MAGLEV_HASH".' + type: string + panicThreshold: + description: If percentage of healthy hosts in the + backend is lower than panic_threshold, traffic will + be routed to all backends no matter what the health + status is. This helps to avoid healthy backends + overloading when everything is bad. Zero means no + panic threshold. + type: number + strictLocality: + description: If set, will route requests only to the + same availability zone. Balancer won't know about + endpoints in other zones. + type: boolean + type: object + type: array + name: + description: Name of the backend. + type: string + port: + description: Port for incoming traffic. + type: number + targetGroupIds: + description: References target groups for the backend. + items: + type: string + type: array + tls: + description: Tls specification that will be used by this + backend. Structure is documented below. + items: + properties: + sni: + description: SNI string for TLS connections. + type: string + validationContext: + items: + properties: + trustedCaBytes: + description: PEM-encoded trusted CA certificate + chain. + type: string + trustedCaId: + description: Trusted CA certificate ID in the + Certificate Manager. + type: string + type: object + type: array + type: object + type: array + weight: + description: Weight of the backend. Traffic will be split + between backends of the same BackendGroup according to + their weights. + type: number + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/alb.yandex-cloud.upjet.crossplane.io_httprouters.yaml b/package/crds/alb.yandex-cloud.upjet.crossplane.io_httprouters.yaml new file mode 100644 index 0000000..61055ef --- /dev/null +++ b/package/crds/alb.yandex-cloud.upjet.crossplane.io_httprouters.yaml @@ -0,0 +1,689 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: httprouters.alb.yandex-cloud.upjet.crossplane.io +spec: + group: alb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: HTTPRouter + listKind: HTTPRouterList + plural: httprouters + singular: httprouter + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: HTTPRouter is the Schema for the HTTPRouters API. The HTTP router + defines the routing rules for HTTP requests to backend groups. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HTTPRouterSpec defines the desired state of HTTPRouter + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional description of the HTTP Router. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this HTTP Router. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the HTTP Router. Provided by the client when + the HTTP Router is created. + type: string + routeOptions: + items: + properties: + rbac: + items: + properties: + action: + type: string + principals: + items: + properties: + andPrincipals: + items: + properties: + any: + type: boolean + header: + items: + properties: + name: + description: Name of the HTTP Router. + Provided by the client when the + HTTP Router is created. + type: string + value: + items: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + type: array + type: object + type: array + remoteIp: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + securityProfileId: + description: The ID of the HTTP Router. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional description of the HTTP Router. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this HTTP Router. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the HTTP Router. Provided by the client when + the HTTP Router is created. + type: string + routeOptions: + items: + properties: + rbac: + items: + properties: + action: + type: string + principals: + items: + properties: + andPrincipals: + items: + properties: + any: + type: boolean + header: + items: + properties: + name: + description: Name of the HTTP Router. + Provided by the client when the + HTTP Router is created. + type: string + value: + items: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + type: array + type: object + type: array + remoteIp: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + securityProfileId: + description: The ID of the HTTP Router. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: HTTPRouterStatus defines the observed state of HTTPRouter. + properties: + atProvider: + properties: + createdAt: + description: The HTTP Router creation timestamp. + type: string + description: + description: An optional description of the HTTP Router. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + id: + description: The ID of the HTTP Router. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this HTTP Router. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the HTTP Router. Provided by the client when + the HTTP Router is created. + type: string + routeOptions: + items: + properties: + rbac: + items: + properties: + action: + type: string + principals: + items: + properties: + andPrincipals: + items: + properties: + any: + type: boolean + header: + items: + properties: + name: + description: Name of the HTTP Router. + Provided by the client when the + HTTP Router is created. + type: string + value: + items: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + type: array + type: object + type: array + remoteIp: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + securityProfileId: + description: The ID of the HTTP Router. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/alb.yandex-cloud.upjet.crossplane.io_loadbalancers.yaml b/package/crds/alb.yandex-cloud.upjet.crossplane.io_loadbalancers.yaml new file mode 100644 index 0000000..7cb37e2 --- /dev/null +++ b/package/crds/alb.yandex-cloud.upjet.crossplane.io_loadbalancers.yaml @@ -0,0 +1,2379 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: loadbalancers.alb.yandex-cloud.upjet.crossplane.io +spec: + group: alb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: LoadBalancer + listKind: LoadBalancerList + plural: loadbalancers + singular: loadbalancer + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: LoadBalancer is the Schema for the LoadBalancers API. A Load + Balancer is used for receiving incoming traffic and transmitting it to the + backend endpoints specified in the ALB Target Groups. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LoadBalancerSpec defines the desired state of LoadBalancer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allocationPolicy: + description: Allocation zones for the Load Balancer instance. + The structure is documented below. + items: + properties: + location: + description: Unique set of locations. The structure is documented + below. + items: + properties: + disableTraffic: + description: If set, will disable all L7 instances + in the zone for request handling. + type: boolean + subnetId: + description: ID of the subnet that location is located + at. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zoneId: + description: ID of the zone that location is located + at. + type: string + type: object + type: array + type: object + type: array + description: + description: An optional description of the Load Balancer. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this Load Balancer. A list of + key/value pairs. + type: object + x-kubernetes-map-type: granular + listener: + description: List of listeners for the Load Balancer. The structure + is documented below. + items: + properties: + endpoint: + description: Network endpoints (addresses and ports) of + the listener. The structure is documented below. + items: + properties: + address: + description: Provided by the client or computed automatically. + items: + properties: + externalIpv4Address: + description: External IPv4 address. The structure + is documented below. + items: + properties: + address: + description: Provided by the client or + computed automatically. + type: string + type: object + type: array + externalIpv6Address: + description: External IPv6 address. The structure + is documented below. + items: + properties: + address: + description: Provided by the client or + computed automatically. + type: string + type: object + type: array + internalIpv4Address: + description: Internal IPv4 address. The structure + is documented below. + items: + properties: + address: + description: Provided by the client or + computed automatically. + type: string + subnetId: + description: Provided by the client or + computed automatically. + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + type: array + ports: + description: One or more ports to listen on. + items: + type: number + type: array + type: object + type: array + http: + description: HTTP listener resource. The structure is documented + below. + items: + properties: + handler: + description: Stream handler that sets plaintext Stream + backend group. The structure is documented below. + items: + properties: + allowHttp10: + description: If set, will enable only HTTP1 + protocol with HTTP1.0 support. + type: boolean + http2Options: + description: If set, will enable HTTP2 protocol + for the handler. The structure is documented + below. + items: + properties: + maxConcurrentStreams: + description: Maximum number of concurrent + streams. + type: number + type: object + type: array + httpRouterId: + description: HTTP router id. + type: string + httpRouterIdRef: + description: Reference to a HTTPRouter to populate + httpRouterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + httpRouterIdSelector: + description: Selector for a HTTPRouter to populate + httpRouterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rewriteRequestId: + description: When unset, will preserve the incoming + x-request-id header, otherwise would rewrite + it with a new value. + type: boolean + type: object + type: array + redirects: + description: Shortcut for adding http -> https redirects. + The structure is documented below. + items: + properties: + httpToHttps: + description: If set redirects all unencrypted + HTTP requests to the same URI with scheme + changed to https. + type: boolean + type: object + type: array + type: object + type: array + name: + description: name of the listener. + type: string + stream: + description: Stream listener resource. The structure is + documented below. + items: + properties: + handler: + description: Stream handler that sets plaintext Stream + backend group. The structure is documented below. + items: + properties: + backendGroupId: + description: Backend group id. + type: string + type: object + type: array + type: object + type: array + tls: + description: TLS listener resource. The structure is documented + below. + items: + properties: + defaultHandler: + description: TLS handler resource. The structure is + documented below. + items: + properties: + certificateIds: + description: Certificate IDs in the Certificate + Manager. Multiple TLS certificates can be + associated with the same context to allow + both RSA and ECDSA certificates. Only the + first certificate of each type will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + httpHandler: + description: HTTP handler resource. The structure + is documented below. + items: + properties: + allowHttp10: + description: If set, will enable only + HTTP1 protocol with HTTP1.0 support. + type: boolean + http2Options: + description: If set, will enable HTTP2 + protocol for the handler. The structure + is documented below. + items: + properties: + maxConcurrentStreams: + description: Maximum number of concurrent + streams. + type: number + type: object + type: array + httpRouterId: + description: HTTP router id. + type: string + rewriteRequestId: + description: When unset, will preserve + the incoming x-request-id header, otherwise + would rewrite it with a new value. + type: boolean + type: object + type: array + streamHandler: + description: Stream handler resource. The structure + is documented below. + items: + properties: + backendGroupId: + description: Backend group id. + type: string + type: object + type: array + type: object + type: array + sniHandler: + description: SNI match resource. The structure is + documented below. + items: + properties: + handler: + description: Stream handler that sets plaintext + Stream backend group. The structure is documented + below. + items: + properties: + certificateIds: + description: Certificate IDs in the Certificate + Manager. Multiple TLS certificates can + be associated with the same context + to allow both RSA and ECDSA certificates. + Only the first certificate of each type + will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + httpHandler: + description: HTTP handler resource. The + structure is documented below. + items: + properties: + allowHttp10: + description: If set, will enable + only HTTP1 protocol with HTTP1.0 + support. + type: boolean + http2Options: + description: If set, will enable + HTTP2 protocol for the handler. + The structure is documented below. + items: + properties: + maxConcurrentStreams: + description: Maximum number + of concurrent streams. + type: number + type: object + type: array + httpRouterId: + description: HTTP router id. + type: string + rewriteRequestId: + description: When unset, will preserve + the incoming x-request-id header, + otherwise would rewrite it with + a new value. + type: boolean + type: object + type: array + streamHandler: + description: Stream handler resource. + The structure is documented below. + items: + properties: + backendGroupId: + description: Backend group id. + type: string + type: object + type: array + type: object + type: array + name: + description: name of SNI match. + type: string + serverNames: + description: A set of server names. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + type: object + type: array + logOptions: + description: Cloud Logging settings. The structure is documented + below. + items: + properties: + disable: + description: Set to true to disable Cloud Logging for the + balancer + type: boolean + discardRule: + description: List of rules to discard a fraction of logs. + The structure is documented below. + items: + properties: + discardPercent: + type: number + grpcCodes: + description: list of grpc codes by name, e.g, ["NOT_FOUND", + "RESOURCE_EXHAUSTED"] + items: + type: string + type: array + httpCodeIntervals: + description: 5XX or ALL + items: + type: string + type: array + httpCodes: + description: "599" + items: + type: number + type: array + type: object + type: array + logGroupId: + description: Cloud Logging group ID to send logs to. Leave + empty to use the balancer folder default log group. + type: string + type: object + type: array + name: + description: Name of the Load Balancer. Provided by the client + when the Load Balancer is created. + type: string + networkId: + description: ID of the network that the Load Balancer is located + at. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + regionId: + description: ID of the region that the Load Balancer is located + at. + type: string + securityGroupIds: + description: A list of ID's of security groups attached to the + Load Balancer. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allocationPolicy: + description: Allocation zones for the Load Balancer instance. + The structure is documented below. + items: + properties: + location: + description: Unique set of locations. The structure is documented + below. + items: + properties: + disableTraffic: + description: If set, will disable all L7 instances + in the zone for request handling. + type: boolean + subnetId: + description: ID of the subnet that location is located + at. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zoneId: + description: ID of the zone that location is located + at. + type: string + type: object + type: array + type: object + type: array + description: + description: An optional description of the Load Balancer. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this Load Balancer. A list of + key/value pairs. + type: object + x-kubernetes-map-type: granular + listener: + description: List of listeners for the Load Balancer. The structure + is documented below. + items: + properties: + endpoint: + description: Network endpoints (addresses and ports) of + the listener. The structure is documented below. + items: + properties: + address: + description: Provided by the client or computed automatically. + items: + properties: + externalIpv4Address: + description: External IPv4 address. The structure + is documented below. + items: + properties: + address: + description: Provided by the client or + computed automatically. + type: string + type: object + type: array + externalIpv6Address: + description: External IPv6 address. The structure + is documented below. + items: + properties: + address: + description: Provided by the client or + computed automatically. + type: string + type: object + type: array + internalIpv4Address: + description: Internal IPv4 address. The structure + is documented below. + items: + properties: + address: + description: Provided by the client or + computed automatically. + type: string + subnetId: + description: Provided by the client or + computed automatically. + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + type: array + ports: + description: One or more ports to listen on. + items: + type: number + type: array + type: object + type: array + http: + description: HTTP listener resource. The structure is documented + below. + items: + properties: + handler: + description: Stream handler that sets plaintext Stream + backend group. The structure is documented below. + items: + properties: + allowHttp10: + description: If set, will enable only HTTP1 + protocol with HTTP1.0 support. + type: boolean + http2Options: + description: If set, will enable HTTP2 protocol + for the handler. The structure is documented + below. + items: + properties: + maxConcurrentStreams: + description: Maximum number of concurrent + streams. + type: number + type: object + type: array + httpRouterId: + description: HTTP router id. + type: string + httpRouterIdRef: + description: Reference to a HTTPRouter to populate + httpRouterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + httpRouterIdSelector: + description: Selector for a HTTPRouter to populate + httpRouterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rewriteRequestId: + description: When unset, will preserve the incoming + x-request-id header, otherwise would rewrite + it with a new value. + type: boolean + type: object + type: array + redirects: + description: Shortcut for adding http -> https redirects. + The structure is documented below. + items: + properties: + httpToHttps: + description: If set redirects all unencrypted + HTTP requests to the same URI with scheme + changed to https. + type: boolean + type: object + type: array + type: object + type: array + name: + description: name of the listener. + type: string + stream: + description: Stream listener resource. The structure is + documented below. + items: + properties: + handler: + description: Stream handler that sets plaintext Stream + backend group. The structure is documented below. + items: + properties: + backendGroupId: + description: Backend group id. + type: string + type: object + type: array + type: object + type: array + tls: + description: TLS listener resource. The structure is documented + below. + items: + properties: + defaultHandler: + description: TLS handler resource. The structure is + documented below. + items: + properties: + certificateIds: + description: Certificate IDs in the Certificate + Manager. Multiple TLS certificates can be + associated with the same context to allow + both RSA and ECDSA certificates. Only the + first certificate of each type will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + httpHandler: + description: HTTP handler resource. The structure + is documented below. + items: + properties: + allowHttp10: + description: If set, will enable only + HTTP1 protocol with HTTP1.0 support. + type: boolean + http2Options: + description: If set, will enable HTTP2 + protocol for the handler. The structure + is documented below. + items: + properties: + maxConcurrentStreams: + description: Maximum number of concurrent + streams. + type: number + type: object + type: array + httpRouterId: + description: HTTP router id. + type: string + rewriteRequestId: + description: When unset, will preserve + the incoming x-request-id header, otherwise + would rewrite it with a new value. + type: boolean + type: object + type: array + streamHandler: + description: Stream handler resource. The structure + is documented below. + items: + properties: + backendGroupId: + description: Backend group id. + type: string + type: object + type: array + type: object + type: array + sniHandler: + description: SNI match resource. The structure is + documented below. + items: + properties: + handler: + description: Stream handler that sets plaintext + Stream backend group. The structure is documented + below. + items: + properties: + certificateIds: + description: Certificate IDs in the Certificate + Manager. Multiple TLS certificates can + be associated with the same context + to allow both RSA and ECDSA certificates. + Only the first certificate of each type + will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + httpHandler: + description: HTTP handler resource. The + structure is documented below. + items: + properties: + allowHttp10: + description: If set, will enable + only HTTP1 protocol with HTTP1.0 + support. + type: boolean + http2Options: + description: If set, will enable + HTTP2 protocol for the handler. + The structure is documented below. + items: + properties: + maxConcurrentStreams: + description: Maximum number + of concurrent streams. + type: number + type: object + type: array + httpRouterId: + description: HTTP router id. + type: string + rewriteRequestId: + description: When unset, will preserve + the incoming x-request-id header, + otherwise would rewrite it with + a new value. + type: boolean + type: object + type: array + streamHandler: + description: Stream handler resource. + The structure is documented below. + items: + properties: + backendGroupId: + description: Backend group id. + type: string + type: object + type: array + type: object + type: array + name: + description: name of SNI match. + type: string + serverNames: + description: A set of server names. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + type: object + type: array + logOptions: + description: Cloud Logging settings. The structure is documented + below. + items: + properties: + disable: + description: Set to true to disable Cloud Logging for the + balancer + type: boolean + discardRule: + description: List of rules to discard a fraction of logs. + The structure is documented below. + items: + properties: + discardPercent: + type: number + grpcCodes: + description: list of grpc codes by name, e.g, ["NOT_FOUND", + "RESOURCE_EXHAUSTED"] + items: + type: string + type: array + httpCodeIntervals: + description: 5XX or ALL + items: + type: string + type: array + httpCodes: + description: "599" + items: + type: number + type: array + type: object + type: array + logGroupId: + description: Cloud Logging group ID to send logs to. Leave + empty to use the balancer folder default log group. + type: string + type: object + type: array + name: + description: Name of the Load Balancer. Provided by the client + when the Load Balancer is created. + type: string + networkId: + description: ID of the network that the Load Balancer is located + at. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + regionId: + description: ID of the region that the Load Balancer is located + at. + type: string + securityGroupIds: + description: A list of ID's of security groups attached to the + Load Balancer. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.allocationPolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.allocationPolicy) + || (has(self.initProvider) && has(self.initProvider.allocationPolicy))' + status: + description: LoadBalancerStatus defines the observed state of LoadBalancer. + properties: + atProvider: + properties: + allocationPolicy: + description: Allocation zones for the Load Balancer instance. + The structure is documented below. + items: + properties: + location: + description: Unique set of locations. The structure is documented + below. + items: + properties: + disableTraffic: + description: If set, will disable all L7 instances + in the zone for request handling. + type: boolean + subnetId: + description: ID of the subnet that location is located + at. + type: string + zoneId: + description: ID of the zone that location is located + at. + type: string + type: object + type: array + type: object + type: array + createdAt: + description: The Load Balancer creation timestamp. + type: string + description: + description: An optional description of the Load Balancer. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + id: + description: The ID of the Load Balancer. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this Load Balancer. A list of + key/value pairs. + type: object + x-kubernetes-map-type: granular + listener: + description: List of listeners for the Load Balancer. The structure + is documented below. + items: + properties: + endpoint: + description: Network endpoints (addresses and ports) of + the listener. The structure is documented below. + items: + properties: + address: + description: Provided by the client or computed automatically. + items: + properties: + externalIpv4Address: + description: External IPv4 address. The structure + is documented below. + items: + properties: + address: + description: Provided by the client or + computed automatically. + type: string + type: object + type: array + externalIpv6Address: + description: External IPv6 address. The structure + is documented below. + items: + properties: + address: + description: Provided by the client or + computed automatically. + type: string + type: object + type: array + internalIpv4Address: + description: Internal IPv4 address. The structure + is documented below. + items: + properties: + address: + description: Provided by the client or + computed automatically. + type: string + subnetId: + description: Provided by the client or + computed automatically. + type: string + type: object + type: array + type: object + type: array + ports: + description: One or more ports to listen on. + items: + type: number + type: array + type: object + type: array + http: + description: HTTP listener resource. The structure is documented + below. + items: + properties: + handler: + description: Stream handler that sets plaintext Stream + backend group. The structure is documented below. + items: + properties: + allowHttp10: + description: If set, will enable only HTTP1 + protocol with HTTP1.0 support. + type: boolean + http2Options: + description: If set, will enable HTTP2 protocol + for the handler. The structure is documented + below. + items: + properties: + maxConcurrentStreams: + description: Maximum number of concurrent + streams. + type: number + type: object + type: array + httpRouterId: + description: HTTP router id. + type: string + rewriteRequestId: + description: When unset, will preserve the incoming + x-request-id header, otherwise would rewrite + it with a new value. + type: boolean + type: object + type: array + redirects: + description: Shortcut for adding http -> https redirects. + The structure is documented below. + items: + properties: + httpToHttps: + description: If set redirects all unencrypted + HTTP requests to the same URI with scheme + changed to https. + type: boolean + type: object + type: array + type: object + type: array + name: + description: name of the listener. + type: string + stream: + description: Stream listener resource. The structure is + documented below. + items: + properties: + handler: + description: Stream handler that sets plaintext Stream + backend group. The structure is documented below. + items: + properties: + backendGroupId: + description: Backend group id. + type: string + type: object + type: array + type: object + type: array + tls: + description: TLS listener resource. The structure is documented + below. + items: + properties: + defaultHandler: + description: TLS handler resource. The structure is + documented below. + items: + properties: + certificateIds: + description: Certificate IDs in the Certificate + Manager. Multiple TLS certificates can be + associated with the same context to allow + both RSA and ECDSA certificates. Only the + first certificate of each type will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + httpHandler: + description: HTTP handler resource. The structure + is documented below. + items: + properties: + allowHttp10: + description: If set, will enable only + HTTP1 protocol with HTTP1.0 support. + type: boolean + http2Options: + description: If set, will enable HTTP2 + protocol for the handler. The structure + is documented below. + items: + properties: + maxConcurrentStreams: + description: Maximum number of concurrent + streams. + type: number + type: object + type: array + httpRouterId: + description: HTTP router id. + type: string + rewriteRequestId: + description: When unset, will preserve + the incoming x-request-id header, otherwise + would rewrite it with a new value. + type: boolean + type: object + type: array + streamHandler: + description: Stream handler resource. The structure + is documented below. + items: + properties: + backendGroupId: + description: Backend group id. + type: string + type: object + type: array + type: object + type: array + sniHandler: + description: SNI match resource. The structure is + documented below. + items: + properties: + handler: + description: Stream handler that sets plaintext + Stream backend group. The structure is documented + below. + items: + properties: + certificateIds: + description: Certificate IDs in the Certificate + Manager. Multiple TLS certificates can + be associated with the same context + to allow both RSA and ECDSA certificates. + Only the first certificate of each type + will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + httpHandler: + description: HTTP handler resource. The + structure is documented below. + items: + properties: + allowHttp10: + description: If set, will enable + only HTTP1 protocol with HTTP1.0 + support. + type: boolean + http2Options: + description: If set, will enable + HTTP2 protocol for the handler. + The structure is documented below. + items: + properties: + maxConcurrentStreams: + description: Maximum number + of concurrent streams. + type: number + type: object + type: array + httpRouterId: + description: HTTP router id. + type: string + rewriteRequestId: + description: When unset, will preserve + the incoming x-request-id header, + otherwise would rewrite it with + a new value. + type: boolean + type: object + type: array + streamHandler: + description: Stream handler resource. + The structure is documented below. + items: + properties: + backendGroupId: + description: Backend group id. + type: string + type: object + type: array + type: object + type: array + name: + description: name of SNI match. + type: string + serverNames: + description: A set of server names. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + type: object + type: array + logGroupId: + description: Cloud log group used by the Load Balancer to store + access logs. + type: string + logOptions: + description: Cloud Logging settings. The structure is documented + below. + items: + properties: + disable: + description: Set to true to disable Cloud Logging for the + balancer + type: boolean + discardRule: + description: List of rules to discard a fraction of logs. + The structure is documented below. + items: + properties: + discardPercent: + type: number + grpcCodes: + description: list of grpc codes by name, e.g, ["NOT_FOUND", + "RESOURCE_EXHAUSTED"] + items: + type: string + type: array + httpCodeIntervals: + description: 5XX or ALL + items: + type: string + type: array + httpCodes: + description: "599" + items: + type: number + type: array + type: object + type: array + logGroupId: + description: Cloud Logging group ID to send logs to. Leave + empty to use the balancer folder default log group. + type: string + type: object + type: array + name: + description: Name of the Load Balancer. Provided by the client + when the Load Balancer is created. + type: string + networkId: + description: ID of the network that the Load Balancer is located + at. + type: string + regionId: + description: ID of the region that the Load Balancer is located + at. + type: string + securityGroupIds: + description: A list of ID's of security groups attached to the + Load Balancer. + items: + type: string + type: array + x-kubernetes-list-type: set + status: + description: Status of the Load Balancer. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/alb.yandex-cloud.upjet.crossplane.io_targetgroups.yaml b/package/crds/alb.yandex-cloud.upjet.crossplane.io_targetgroups.yaml new file mode 100644 index 0000000..584eab4 --- /dev/null +++ b/package/crds/alb.yandex-cloud.upjet.crossplane.io_targetgroups.yaml @@ -0,0 +1,736 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: targetgroups.alb.yandex-cloud.upjet.crossplane.io +spec: + group: alb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: TargetGroup + listKind: TargetGroupList + plural: targetgroups + singular: targetgroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TargetGroup is the Schema for the TargetGroups API. An application + load balancer distributes the load across cloud resources that are combined + into a target group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TargetGroupSpec defines the desired state of TargetGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional description of the target group. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this target group. A list of + key/value pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the target group. Provided by the client + when the target group is created. + type: string + target: + description: A Target resource. The structure is documented below. + items: + properties: + ipAddress: + description: IP address of the target. + type: string + privateIpv4Address: + type: boolean + subnetId: + description: ID of the subnet that targets are connected + to. All targets in the target group must be connected + to the same subnet within a single availability zone. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional description of the target group. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this target group. A list of + key/value pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the target group. Provided by the client + when the target group is created. + type: string + target: + description: A Target resource. The structure is documented below. + items: + properties: + ipAddress: + description: IP address of the target. + type: string + privateIpv4Address: + type: boolean + subnetId: + description: ID of the subnet that targets are connected + to. All targets in the target group must be connected + to the same subnet within a single availability zone. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TargetGroupStatus defines the observed state of TargetGroup. + properties: + atProvider: + properties: + createdAt: + description: The target group creation timestamp. + type: string + description: + description: An optional description of the target group. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + id: + description: The ID of the target group. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this target group. A list of + key/value pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the target group. Provided by the client + when the target group is created. + type: string + target: + description: A Target resource. The structure is documented below. + items: + properties: + ipAddress: + description: IP address of the target. + type: string + privateIpv4Address: + type: boolean + subnetId: + description: ID of the subnet that targets are connected + to. All targets in the target group must be connected + to the same subnet within a single availability zone. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/alb.yandex-cloud.upjet.crossplane.io_virtualhosts.yaml b/package/crds/alb.yandex-cloud.upjet.crossplane.io_virtualhosts.yaml new file mode 100644 index 0000000..406ade2 --- /dev/null +++ b/package/crds/alb.yandex-cloud.upjet.crossplane.io_virtualhosts.yaml @@ -0,0 +1,1933 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: virtualhosts.alb.yandex-cloud.upjet.crossplane.io +spec: + group: alb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: VirtualHost + listKind: VirtualHostList + plural: virtualhosts + singular: virtualhost + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: VirtualHost is the Schema for the VirtualHosts API. Virtual hosts + combine routes belonging to the same set of domains. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualHostSpec defines the desired state of VirtualHost + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authority: + description: A list of domains (host/authority header) that will + be matched to this virtual host. Wildcard hosts are supported + in the form of '.foo.com' or '-bar.foo.com'. If not specified, + all domains will be matched. + items: + type: string + type: array + x-kubernetes-list-type: set + httpRouterId: + description: The ID of the HTTP router to which the virtual host + belongs. + type: string + httpRouterIdRef: + description: Reference to a HTTPRouter to populate httpRouterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + httpRouterIdSelector: + description: Selector for a HTTPRouter to populate httpRouterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + modifyRequestHeaders: + description: Apply the following modifications to the request + headers. The structure is documented below. + items: + properties: + append: + description: Append string to the header value. + type: string + name: + description: name of the header to modify. + type: string + remove: + description: If set, remove the header. + type: boolean + replace: + description: New value for a header. Header values support + the following formatters. + type: string + type: object + type: array + modifyResponseHeaders: + description: Apply the following modifications to the response + headers. The structure is documented below. + items: + properties: + append: + description: Append string to the header value. + type: string + name: + description: name of the route. + type: string + remove: + description: If set, remove the header. + type: boolean + replace: + description: New value for a header. Header values support + the following formatters. + type: string + type: object + type: array + name: + description: Name of the virtual host. Provided by the client + when the virtual host is created. + type: string + route: + description: A Route resource. Routes are matched in-order. Be + careful when adding them to the end. For instance, having http + '/' match first makes all other routes unused. The structure + is documented below. + items: + properties: + grpcRoute: + description: GRPC route resource. The structure is documented + below. + items: + properties: + grpcMatch: + description: Checks "/" prefix by default. The structure + is documented below. + items: + properties: + fqmn: + description: If not set, all services/methods + are assumed. The structure is documented below. + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + grpcRouteAction: + description: GRPC route action resource. The structure + is documented below. + items: + properties: + autoHostRewrite: + description: If set, will automatically rewrite + host. + type: boolean + backendGroupId: + description: Backend group to route requests. + type: string + backendGroupIdRef: + description: Reference to a BackendGroup to + populate backendGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + backendGroupIdSelector: + description: Selector for a BackendGroup to + populate backendGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostRewrite: + description: Host rewrite specifier. + type: string + idleTimeout: + description: Specifies the idle timeout (time + without any data transfer for the active request) + for the route. It is useful for streaming + scenarios (i.e. long-polling, server-sent + events) - one should set idle_timeout to something + meaningful and timeout to the maximum time + the stream is allowed to be alive. If not + specified, there is no per-route idle timeout. + type: string + maxTimeout: + description: Lower timeout may be specified + by the client (using grpc-timeout header). + If not set, default is 60 seconds. + type: string + type: object + type: array + grpcStatusResponseAction: + description: GRPC status response action resource. + The structure is documented below. + items: + properties: + status: + description: 'The status of the response. Supported + values are: ok, invalid_argumet, not_found, + permission_denied, unauthenticated, unimplemented, + internal, unavailable.' + type: string + type: object + type: array + type: object + type: array + httpRoute: + description: HTTP route resource. The structure is documented + below. + items: + properties: + directResponseAction: + description: Direct response action resource. The + structure is documented below. + items: + properties: + body: + description: Response body text. + type: string + status: + description: 'The status of the response. Supported + values are: ok, invalid_argumet, not_found, + permission_denied, unauthenticated, unimplemented, + internal, unavailable.' + type: number + type: object + type: array + httpMatch: + description: Checks "/" prefix by default. The structure + is documented below. + items: + properties: + httpMethod: + description: List of methods(strings). + items: + type: string + type: array + x-kubernetes-list-type: set + path: + description: If not set, '/' is assumed. The + structure is documented below. + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + httpRouteAction: + description: HTTP route action resource. The structure + is documented below. + items: + properties: + autoHostRewrite: + description: If set, will automatically rewrite + host. + type: boolean + backendGroupId: + description: Backend group to route requests. + type: string + backendGroupIdRef: + description: Reference to a BackendGroup to + populate backendGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + backendGroupIdSelector: + description: Selector for a BackendGroup to + populate backendGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostRewrite: + description: Host rewrite specifier. + type: string + idleTimeout: + description: Specifies the idle timeout (time + without any data transfer for the active request) + for the route. It is useful for streaming + scenarios (i.e. long-polling, server-sent + events) - one should set idle_timeout to something + meaningful and timeout to the maximum time + the stream is allowed to be alive. If not + specified, there is no per-route idle timeout. + type: string + prefixRewrite: + description: If not empty, matched path prefix + will be replaced by this value. + type: string + timeout: + description: Specifies the request timeout (overall + time request processing is allowed to take) + for the route. If not set, default is 60 seconds. + type: string + upgradeTypes: + description: List of upgrade types. Only specified + upgrade types will be allowed. For example, + "websocket". + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + redirectAction: + description: Redirect action resource. The structure + is documented below. + items: + properties: + removeQuery: + type: boolean + replaceHost: + description: Replaces hostname. + type: string + replacePath: + description: Replace path. + type: string + replacePort: + description: Replaces port. + type: number + replacePrefix: + description: 'Replace only matched prefix. Example: + match:{ prefix_match: "/some" } redirect: + { replace_prefix: "/other" } will redirect + "/something" to "/otherthing".' + type: string + replaceScheme: + description: Replaces scheme. If the original + scheme is http or https, will also remove + the 80 or 443 port, if present. + type: string + responseCode: + description: 'The HTTP status code to use in + the redirect response. Supported values are: + moved_permanently, found, see_other, temporary_redirect, + permanent_redirect.' + type: string + type: object + type: array + type: object + type: array + name: + description: name of the route. + type: string + routeOptions: + description: Route options for the virtual host. The structure + is documented below. + items: + properties: + rbac: + description: RBAC configuration. + items: + properties: + action: + type: string + principals: + items: + properties: + andPrincipals: + items: + properties: + any: + type: boolean + header: + items: + properties: + name: + description: name of the route. + type: string + value: + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + remoteIp: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + securityProfileId: + description: SWS profile ID. + type: string + type: object + type: array + type: object + type: array + routeOptions: + description: Route options for the virtual host. The structure + is documented below. + items: + properties: + rbac: + description: RBAC configuration. + items: + properties: + action: + type: string + principals: + items: + properties: + andPrincipals: + items: + properties: + any: + type: boolean + header: + items: + properties: + name: + description: name of the route. + type: string + value: + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + remoteIp: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + securityProfileId: + description: SWS profile ID. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authority: + description: A list of domains (host/authority header) that will + be matched to this virtual host. Wildcard hosts are supported + in the form of '.foo.com' or '-bar.foo.com'. If not specified, + all domains will be matched. + items: + type: string + type: array + x-kubernetes-list-type: set + httpRouterId: + description: The ID of the HTTP router to which the virtual host + belongs. + type: string + httpRouterIdRef: + description: Reference to a HTTPRouter to populate httpRouterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + httpRouterIdSelector: + description: Selector for a HTTPRouter to populate httpRouterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + modifyRequestHeaders: + description: Apply the following modifications to the request + headers. The structure is documented below. + items: + properties: + append: + description: Append string to the header value. + type: string + name: + description: name of the header to modify. + type: string + remove: + description: If set, remove the header. + type: boolean + replace: + description: New value for a header. Header values support + the following formatters. + type: string + type: object + type: array + modifyResponseHeaders: + description: Apply the following modifications to the response + headers. The structure is documented below. + items: + properties: + append: + description: Append string to the header value. + type: string + name: + description: name of the route. + type: string + remove: + description: If set, remove the header. + type: boolean + replace: + description: New value for a header. Header values support + the following formatters. + type: string + type: object + type: array + name: + description: Name of the virtual host. Provided by the client + when the virtual host is created. + type: string + route: + description: A Route resource. Routes are matched in-order. Be + careful when adding them to the end. For instance, having http + '/' match first makes all other routes unused. The structure + is documented below. + items: + properties: + grpcRoute: + description: GRPC route resource. The structure is documented + below. + items: + properties: + grpcMatch: + description: Checks "/" prefix by default. The structure + is documented below. + items: + properties: + fqmn: + description: If not set, all services/methods + are assumed. The structure is documented below. + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + grpcRouteAction: + description: GRPC route action resource. The structure + is documented below. + items: + properties: + autoHostRewrite: + description: If set, will automatically rewrite + host. + type: boolean + backendGroupId: + description: Backend group to route requests. + type: string + backendGroupIdRef: + description: Reference to a BackendGroup to + populate backendGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + backendGroupIdSelector: + description: Selector for a BackendGroup to + populate backendGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostRewrite: + description: Host rewrite specifier. + type: string + idleTimeout: + description: Specifies the idle timeout (time + without any data transfer for the active request) + for the route. It is useful for streaming + scenarios (i.e. long-polling, server-sent + events) - one should set idle_timeout to something + meaningful and timeout to the maximum time + the stream is allowed to be alive. If not + specified, there is no per-route idle timeout. + type: string + maxTimeout: + description: Lower timeout may be specified + by the client (using grpc-timeout header). + If not set, default is 60 seconds. + type: string + type: object + type: array + grpcStatusResponseAction: + description: GRPC status response action resource. + The structure is documented below. + items: + properties: + status: + description: 'The status of the response. Supported + values are: ok, invalid_argumet, not_found, + permission_denied, unauthenticated, unimplemented, + internal, unavailable.' + type: string + type: object + type: array + type: object + type: array + httpRoute: + description: HTTP route resource. The structure is documented + below. + items: + properties: + directResponseAction: + description: Direct response action resource. The + structure is documented below. + items: + properties: + body: + description: Response body text. + type: string + status: + description: 'The status of the response. Supported + values are: ok, invalid_argumet, not_found, + permission_denied, unauthenticated, unimplemented, + internal, unavailable.' + type: number + type: object + type: array + httpMatch: + description: Checks "/" prefix by default. The structure + is documented below. + items: + properties: + httpMethod: + description: List of methods(strings). + items: + type: string + type: array + x-kubernetes-list-type: set + path: + description: If not set, '/' is assumed. The + structure is documented below. + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + httpRouteAction: + description: HTTP route action resource. The structure + is documented below. + items: + properties: + autoHostRewrite: + description: If set, will automatically rewrite + host. + type: boolean + backendGroupId: + description: Backend group to route requests. + type: string + backendGroupIdRef: + description: Reference to a BackendGroup to + populate backendGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + backendGroupIdSelector: + description: Selector for a BackendGroup to + populate backendGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostRewrite: + description: Host rewrite specifier. + type: string + idleTimeout: + description: Specifies the idle timeout (time + without any data transfer for the active request) + for the route. It is useful for streaming + scenarios (i.e. long-polling, server-sent + events) - one should set idle_timeout to something + meaningful and timeout to the maximum time + the stream is allowed to be alive. If not + specified, there is no per-route idle timeout. + type: string + prefixRewrite: + description: If not empty, matched path prefix + will be replaced by this value. + type: string + timeout: + description: Specifies the request timeout (overall + time request processing is allowed to take) + for the route. If not set, default is 60 seconds. + type: string + upgradeTypes: + description: List of upgrade types. Only specified + upgrade types will be allowed. For example, + "websocket". + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + redirectAction: + description: Redirect action resource. The structure + is documented below. + items: + properties: + removeQuery: + type: boolean + replaceHost: + description: Replaces hostname. + type: string + replacePath: + description: Replace path. + type: string + replacePort: + description: Replaces port. + type: number + replacePrefix: + description: 'Replace only matched prefix. Example: + match:{ prefix_match: "/some" } redirect: + { replace_prefix: "/other" } will redirect + "/something" to "/otherthing".' + type: string + replaceScheme: + description: Replaces scheme. If the original + scheme is http or https, will also remove + the 80 or 443 port, if present. + type: string + responseCode: + description: 'The HTTP status code to use in + the redirect response. Supported values are: + moved_permanently, found, see_other, temporary_redirect, + permanent_redirect.' + type: string + type: object + type: array + type: object + type: array + name: + description: name of the route. + type: string + routeOptions: + description: Route options for the virtual host. The structure + is documented below. + items: + properties: + rbac: + description: RBAC configuration. + items: + properties: + action: + type: string + principals: + items: + properties: + andPrincipals: + items: + properties: + any: + type: boolean + header: + items: + properties: + name: + description: name of the route. + type: string + value: + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + remoteIp: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + securityProfileId: + description: SWS profile ID. + type: string + type: object + type: array + type: object + type: array + routeOptions: + description: Route options for the virtual host. The structure + is documented below. + items: + properties: + rbac: + description: RBAC configuration. + items: + properties: + action: + type: string + principals: + items: + properties: + andPrincipals: + items: + properties: + any: + type: boolean + header: + items: + properties: + name: + description: name of the route. + type: string + value: + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + remoteIp: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + securityProfileId: + description: SWS profile ID. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: VirtualHostStatus defines the observed state of VirtualHost. + properties: + atProvider: + properties: + authority: + description: A list of domains (host/authority header) that will + be matched to this virtual host. Wildcard hosts are supported + in the form of '.foo.com' or '-bar.foo.com'. If not specified, + all domains will be matched. + items: + type: string + type: array + x-kubernetes-list-type: set + httpRouterId: + description: The ID of the HTTP router to which the virtual host + belongs. + type: string + id: + description: The ID of the virtual host. + type: string + modifyRequestHeaders: + description: Apply the following modifications to the request + headers. The structure is documented below. + items: + properties: + append: + description: Append string to the header value. + type: string + name: + description: name of the header to modify. + type: string + remove: + description: If set, remove the header. + type: boolean + replace: + description: New value for a header. Header values support + the following formatters. + type: string + type: object + type: array + modifyResponseHeaders: + description: Apply the following modifications to the response + headers. The structure is documented below. + items: + properties: + append: + description: Append string to the header value. + type: string + name: + description: name of the route. + type: string + remove: + description: If set, remove the header. + type: boolean + replace: + description: New value for a header. Header values support + the following formatters. + type: string + type: object + type: array + name: + description: Name of the virtual host. Provided by the client + when the virtual host is created. + type: string + route: + description: A Route resource. Routes are matched in-order. Be + careful when adding them to the end. For instance, having http + '/' match first makes all other routes unused. The structure + is documented below. + items: + properties: + grpcRoute: + description: GRPC route resource. The structure is documented + below. + items: + properties: + grpcMatch: + description: Checks "/" prefix by default. The structure + is documented below. + items: + properties: + fqmn: + description: If not set, all services/methods + are assumed. The structure is documented below. + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + grpcRouteAction: + description: GRPC route action resource. The structure + is documented below. + items: + properties: + autoHostRewrite: + description: If set, will automatically rewrite + host. + type: boolean + backendGroupId: + description: Backend group to route requests. + type: string + hostRewrite: + description: Host rewrite specifier. + type: string + idleTimeout: + description: Specifies the idle timeout (time + without any data transfer for the active request) + for the route. It is useful for streaming + scenarios (i.e. long-polling, server-sent + events) - one should set idle_timeout to something + meaningful and timeout to the maximum time + the stream is allowed to be alive. If not + specified, there is no per-route idle timeout. + type: string + maxTimeout: + description: Lower timeout may be specified + by the client (using grpc-timeout header). + If not set, default is 60 seconds. + type: string + type: object + type: array + grpcStatusResponseAction: + description: GRPC status response action resource. + The structure is documented below. + items: + properties: + status: + description: 'The status of the response. Supported + values are: ok, invalid_argumet, not_found, + permission_denied, unauthenticated, unimplemented, + internal, unavailable.' + type: string + type: object + type: array + type: object + type: array + httpRoute: + description: HTTP route resource. The structure is documented + below. + items: + properties: + directResponseAction: + description: Direct response action resource. The + structure is documented below. + items: + properties: + body: + description: Response body text. + type: string + status: + description: 'The status of the response. Supported + values are: ok, invalid_argumet, not_found, + permission_denied, unauthenticated, unimplemented, + internal, unavailable.' + type: number + type: object + type: array + httpMatch: + description: Checks "/" prefix by default. The structure + is documented below. + items: + properties: + httpMethod: + description: List of methods(strings). + items: + type: string + type: array + x-kubernetes-list-type: set + path: + description: If not set, '/' is assumed. The + structure is documented below. + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + httpRouteAction: + description: HTTP route action resource. The structure + is documented below. + items: + properties: + autoHostRewrite: + description: If set, will automatically rewrite + host. + type: boolean + backendGroupId: + description: Backend group to route requests. + type: string + hostRewrite: + description: Host rewrite specifier. + type: string + idleTimeout: + description: Specifies the idle timeout (time + without any data transfer for the active request) + for the route. It is useful for streaming + scenarios (i.e. long-polling, server-sent + events) - one should set idle_timeout to something + meaningful and timeout to the maximum time + the stream is allowed to be alive. If not + specified, there is no per-route idle timeout. + type: string + prefixRewrite: + description: If not empty, matched path prefix + will be replaced by this value. + type: string + timeout: + description: Specifies the request timeout (overall + time request processing is allowed to take) + for the route. If not set, default is 60 seconds. + type: string + upgradeTypes: + description: List of upgrade types. Only specified + upgrade types will be allowed. For example, + "websocket". + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + redirectAction: + description: Redirect action resource. The structure + is documented below. + items: + properties: + removeQuery: + type: boolean + replaceHost: + description: Replaces hostname. + type: string + replacePath: + description: Replace path. + type: string + replacePort: + description: Replaces port. + type: number + replacePrefix: + description: 'Replace only matched prefix. Example: + match:{ prefix_match: "/some" } redirect: + { replace_prefix: "/other" } will redirect + "/something" to "/otherthing".' + type: string + replaceScheme: + description: Replaces scheme. If the original + scheme is http or https, will also remove + the 80 or 443 port, if present. + type: string + responseCode: + description: 'The HTTP status code to use in + the redirect response. Supported values are: + moved_permanently, found, see_other, temporary_redirect, + permanent_redirect.' + type: string + type: object + type: array + type: object + type: array + name: + description: name of the route. + type: string + routeOptions: + description: Route options for the virtual host. The structure + is documented below. + items: + properties: + rbac: + description: RBAC configuration. + items: + properties: + action: + type: string + principals: + items: + properties: + andPrincipals: + items: + properties: + any: + type: boolean + header: + items: + properties: + name: + description: name of the route. + type: string + value: + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + remoteIp: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + securityProfileId: + description: SWS profile ID. + type: string + type: object + type: array + type: object + type: array + routeOptions: + description: Route options for the virtual host. The structure + is documented below. + items: + properties: + rbac: + description: RBAC configuration. + items: + properties: + action: + type: string + principals: + items: + properties: + andPrincipals: + items: + properties: + any: + type: boolean + header: + items: + properties: + name: + description: name of the route. + type: string + value: + items: + properties: + exact: + description: Match exactly. + type: string + prefix: + description: Match prefix. + type: string + regex: + description: Match regex. + type: string + type: object + type: array + type: object + type: array + remoteIp: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + securityProfileId: + description: SWS profile ID. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/api.yandex-cloud.upjet.crossplane.io_gateways.yaml b/package/crds/api.yandex-cloud.upjet.crossplane.io_gateways.yaml new file mode 100644 index 0000000..16f7ac1 --- /dev/null +++ b/package/crds/api.yandex-cloud.upjet.crossplane.io_gateways.yaml @@ -0,0 +1,783 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: gateways.api.yandex-cloud.upjet.crossplane.io +spec: + group: api.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Gateway + listKind: GatewayList + plural: gateways + singular: gateway + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Gateway is the Schema for the Gateways API. Allows management + of a Yandex Cloud API Gateway. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GatewaySpec defines the desired state of Gateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + canary: + description: Canary release settings of gateway. + items: + properties: + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + weight: + description: Percentage of requests, which will be processed + by canary release. + type: number + type: object + type: array + connectivity: + description: Gateway connectivity. If specified the gateway will + be attached to specified network. + items: + properties: + networkId: + description: Network the gateway will have access to. It's + essential to specify network with subnets in all availability + zones. + type: string + type: object + type: array + customDomains: + description: Set of custom domains to be attached to Yandex API + Gateway. + items: + properties: + certificateId: + type: string + domainId: + type: string + fqdn: + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud API Gateway. + type: string + executionTimeout: + description: Execution timeout in seconds for the Yandex Cloud + API Gateway. + type: string + folderId: + description: Folder ID for the Yandex Cloud API Gateway. If it + is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud API Gateway. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud API Gateway. + items: + properties: + disabled: + description: Is logging from API Gateway disabled + type: boolean + folderId: + description: Folder ID for the Yandex Cloud API Gateway. + If it is not provided, the default provider folder is + used. + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: Yandex Cloud API Gateway name used to define API + Gateway. + type: string + spec: + description: OpenAPI specification for Yandex API Gateway. + type: string + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + canary: + description: Canary release settings of gateway. + items: + properties: + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + weight: + description: Percentage of requests, which will be processed + by canary release. + type: number + type: object + type: array + connectivity: + description: Gateway connectivity. If specified the gateway will + be attached to specified network. + items: + properties: + networkId: + description: Network the gateway will have access to. It's + essential to specify network with subnets in all availability + zones. + type: string + type: object + type: array + customDomains: + description: Set of custom domains to be attached to Yandex API + Gateway. + items: + properties: + certificateId: + type: string + domainId: + type: string + fqdn: + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud API Gateway. + type: string + executionTimeout: + description: Execution timeout in seconds for the Yandex Cloud + API Gateway. + type: string + folderId: + description: Folder ID for the Yandex Cloud API Gateway. If it + is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud API Gateway. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud API Gateway. + items: + properties: + disabled: + description: Is logging from API Gateway disabled + type: boolean + folderId: + description: Folder ID for the Yandex Cloud API Gateway. + If it is not provided, the default provider folder is + used. + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: Yandex Cloud API Gateway name used to define API + Gateway. + type: string + spec: + description: OpenAPI specification for Yandex API Gateway. + type: string + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.spec is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.spec) + || (has(self.initProvider) && has(self.initProvider.spec))' + status: + description: GatewayStatus defines the observed state of Gateway. + properties: + atProvider: + properties: + canary: + description: Canary release settings of gateway. + items: + properties: + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + weight: + description: Percentage of requests, which will be processed + by canary release. + type: number + type: object + type: array + connectivity: + description: Gateway connectivity. If specified the gateway will + be attached to specified network. + items: + properties: + networkId: + description: Network the gateway will have access to. It's + essential to specify network with subnets in all availability + zones. + type: string + type: object + type: array + createdAt: + description: Creation timestamp of the Yandex Cloud API Gateway. + type: string + customDomains: + description: Set of custom domains to be attached to Yandex API + Gateway. + items: + properties: + certificateId: + type: string + domainId: + type: string + fqdn: + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud API Gateway. + type: string + domain: + description: Default domain for the Yandex API Gateway. Generated + at creation time. + type: string + executionTimeout: + description: Execution timeout in seconds for the Yandex Cloud + API Gateway. + type: string + folderId: + description: Folder ID for the Yandex Cloud API Gateway. If it + is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud API Gateway. + type: object + x-kubernetes-map-type: granular + logGroupId: + description: Log entries are written to specified log group + type: string + logOptions: + description: Options for logging from Yandex Cloud API Gateway. + items: + properties: + disabled: + description: Is logging from API Gateway disabled + type: boolean + folderId: + description: Folder ID for the Yandex Cloud API Gateway. + If it is not provided, the default provider folder is + used. + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: Yandex Cloud API Gateway name used to define API + Gateway. + type: string + spec: + description: OpenAPI specification for Yandex API Gateway. + type: string + status: + description: Status of the Yandex API Gateway. + type: string + userDomains: + description: (DEPRECATED, use custom_domains instead) Set of user + domains attached to Yandex API Gateway. + items: + type: string + type: array + x-kubernetes-list-type: set + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/audit.yandex-cloud.upjet.crossplane.io_trailstrails.yaml b/package/crds/audit.yandex-cloud.upjet.crossplane.io_trailstrails.yaml new file mode 100644 index 0000000..f8206bb --- /dev/null +++ b/package/crds/audit.yandex-cloud.upjet.crossplane.io_trailstrails.yaml @@ -0,0 +1,1306 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: trailstrails.audit.yandex-cloud.upjet.crossplane.io +spec: + group: audit.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: TrailsTrail + listKind: TrailsTrailList + plural: trailstrails + singular: trailstrail + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TrailsTrail is the Schema for the TrailsTrails API. Manages a + trail resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TrailsTrailSpec defines the desired state of TrailsTrail + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dataStreamDestination: + description: Structure describing destination data stream of the + trail. Mutually exclusive with logging_destination and storage_destination. + items: + properties: + databaseId: + description: ID of the YDB hosting the destination data + stream. + type: string + streamName: + description: Name of the YDS stream belonging to the specified + YDB. + type: string + type: object + type: array + description: + description: Description of the trail. + type: string + filter: + description: Structure describing event filtering process for + the trail. + items: + properties: + eventFilters: + description: Structure describing filtering process for + the service-specific data plane events. + items: + properties: + categories: + description: blocks. With the introduction of included_events/excluded_events + you can configure filtering per each event type. + items: + properties: + plane: + description: 'Type of the event by its relation + to the cloud resource model. Possible values: + CONTROL_PLANE/DATA_PLANE.' + type: string + type: + description: 'Type of the event by its operation + effect on the resource. Possible values: READ/WRITE.' + type: string + type: object + type: array + pathFilter: + description: with the appropriate resource_scope blocks. + You have to account that resource_scope does not + support specifying relations between resources, + so your configuration will simplify to only the + actual resources, that will be monitored. + items: + properties: + anyFilter: + description: Structure describing that events + will be gathered from all cloud resources + that belong to the parent resource. Mutually + exclusive with some_filter. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent + resource. + type: string + type: object + type: array + someFilter: + description: Structure describing that events + will be gathered from some of the cloud resources + that belong to the parent resource. Mutually + exclusive with any_filter. + items: + properties: + anyFilters: + description: List of child resources from + which events will be gathered. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the + parent resource. + type: string + type: object + type: array + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent + resource. + type: string + type: object + type: array + type: object + type: array + service: + description: ID of the service which events will be + gathered. + type: string + type: object + type: array + pathFilter: + description: block with the filtering_policy.management_events_filter. + New API states management events filtration in a more + clear way. The resources, that were specified, must migrate + into the filtering_policy.management_events_filter.resource_scope + items: + properties: + anyFilter: + description: Structure describing that events will + be gathered from all cloud resources that belong + to the parent resource. Mutually exclusive with + some_filter. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + someFilter: + description: Structure describing that events will + be gathered from some of the cloud resources that + belong to the parent resource. Mutually exclusive + with any_filter. + items: + properties: + anyFilters: + description: List of child resources from which + events will be gathered. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent + resource. + type: string + type: object + type: array + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + type: object + type: array + type: object + type: array + filteringPolicy: + description: Structure describing event filtering process for + the trail. Mutually exclusive with filter. At least one of the + management_events_filter or data_events_filter fields will be + filled. + items: + properties: + dataEventsFilter: + description: Structure describing filtering process for + the service-specific data events. + items: + properties: + excludedEvents: + description: A list of events that won't be gathered + by the trail from this service. New events will + be automatically gathered when this option is specified. + Mutually exclusive with included_events. + items: + type: string + type: array + includedEvents: + description: A list of events that will be gathered + by the trail from this service. New events won't + be gathered by default when this option is specified. + Mutually exclusive with excluded_events. + items: + type: string + type: array + resourceScope: + description: Structure describing that events will + be gathered from the specified resource. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + service: + description: ID of the service which events will be + gathered. + type: string + type: object + type: array + managementEventsFilter: + description: Structure describing filtering process for + management events. + items: + properties: + resourceScope: + description: Structure describing that events will + be gathered from the specified resource. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + type: object + type: array + type: object + type: array + folderId: + description: ID of the folder to which the trail belongs. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels defined by the user. + type: object + x-kubernetes-map-type: granular + loggingDestination: + description: Structure describing destination log group of the + trail. Mutually exclusive with storage_destination and data_stream_destination. + items: + properties: + logGroupId: + description: ID of the destination Cloud Logging Group. + type: string + type: object + type: array + name: + description: Name of the trail. + type: string + serviceAccountId: + description: ID of the IAM service account that is used by the + trail. + type: string + storageDestination: + description: Structure describing destination bucket of the trail. + Mutually exclusive with logging_destination and data_stream_destination. + items: + properties: + bucketName: + description: Name of the destination bucket. + type: string + objectPrefix: + description: Additional prefix of the uploaded objects. + If not specified, objects will be uploaded with prefix + equal to trail_id. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dataStreamDestination: + description: Structure describing destination data stream of the + trail. Mutually exclusive with logging_destination and storage_destination. + items: + properties: + databaseId: + description: ID of the YDB hosting the destination data + stream. + type: string + streamName: + description: Name of the YDS stream belonging to the specified + YDB. + type: string + type: object + type: array + description: + description: Description of the trail. + type: string + filter: + description: Structure describing event filtering process for + the trail. + items: + properties: + eventFilters: + description: Structure describing filtering process for + the service-specific data plane events. + items: + properties: + categories: + description: blocks. With the introduction of included_events/excluded_events + you can configure filtering per each event type. + items: + properties: + plane: + description: 'Type of the event by its relation + to the cloud resource model. Possible values: + CONTROL_PLANE/DATA_PLANE.' + type: string + type: + description: 'Type of the event by its operation + effect on the resource. Possible values: READ/WRITE.' + type: string + type: object + type: array + pathFilter: + description: with the appropriate resource_scope blocks. + You have to account that resource_scope does not + support specifying relations between resources, + so your configuration will simplify to only the + actual resources, that will be monitored. + items: + properties: + anyFilter: + description: Structure describing that events + will be gathered from all cloud resources + that belong to the parent resource. Mutually + exclusive with some_filter. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent + resource. + type: string + type: object + type: array + someFilter: + description: Structure describing that events + will be gathered from some of the cloud resources + that belong to the parent resource. Mutually + exclusive with any_filter. + items: + properties: + anyFilters: + description: List of child resources from + which events will be gathered. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the + parent resource. + type: string + type: object + type: array + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent + resource. + type: string + type: object + type: array + type: object + type: array + service: + description: ID of the service which events will be + gathered. + type: string + type: object + type: array + pathFilter: + description: block with the filtering_policy.management_events_filter. + New API states management events filtration in a more + clear way. The resources, that were specified, must migrate + into the filtering_policy.management_events_filter.resource_scope + items: + properties: + anyFilter: + description: Structure describing that events will + be gathered from all cloud resources that belong + to the parent resource. Mutually exclusive with + some_filter. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + someFilter: + description: Structure describing that events will + be gathered from some of the cloud resources that + belong to the parent resource. Mutually exclusive + with any_filter. + items: + properties: + anyFilters: + description: List of child resources from which + events will be gathered. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent + resource. + type: string + type: object + type: array + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + type: object + type: array + type: object + type: array + filteringPolicy: + description: Structure describing event filtering process for + the trail. Mutually exclusive with filter. At least one of the + management_events_filter or data_events_filter fields will be + filled. + items: + properties: + dataEventsFilter: + description: Structure describing filtering process for + the service-specific data events. + items: + properties: + excludedEvents: + description: A list of events that won't be gathered + by the trail from this service. New events will + be automatically gathered when this option is specified. + Mutually exclusive with included_events. + items: + type: string + type: array + includedEvents: + description: A list of events that will be gathered + by the trail from this service. New events won't + be gathered by default when this option is specified. + Mutually exclusive with excluded_events. + items: + type: string + type: array + resourceScope: + description: Structure describing that events will + be gathered from the specified resource. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + service: + description: ID of the service which events will be + gathered. + type: string + type: object + type: array + managementEventsFilter: + description: Structure describing filtering process for + management events. + items: + properties: + resourceScope: + description: Structure describing that events will + be gathered from the specified resource. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + type: object + type: array + type: object + type: array + folderId: + description: ID of the folder to which the trail belongs. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels defined by the user. + type: object + x-kubernetes-map-type: granular + loggingDestination: + description: Structure describing destination log group of the + trail. Mutually exclusive with storage_destination and data_stream_destination. + items: + properties: + logGroupId: + description: ID of the destination Cloud Logging Group. + type: string + type: object + type: array + name: + description: Name of the trail. + type: string + serviceAccountId: + description: ID of the IAM service account that is used by the + trail. + type: string + storageDestination: + description: Structure describing destination bucket of the trail. + Mutually exclusive with logging_destination and data_stream_destination. + items: + properties: + bucketName: + description: Name of the destination bucket. + type: string + objectPrefix: + description: Additional prefix of the uploaded objects. + If not specified, objects will be uploaded with prefix + equal to trail_id. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.serviceAccountId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceAccountId) + || (has(self.initProvider) && has(self.initProvider.serviceAccountId))' + status: + description: TrailsTrailStatus defines the observed state of TrailsTrail. + properties: + atProvider: + properties: + dataStreamDestination: + description: Structure describing destination data stream of the + trail. Mutually exclusive with logging_destination and storage_destination. + items: + properties: + databaseId: + description: ID of the YDB hosting the destination data + stream. + type: string + streamName: + description: Name of the YDS stream belonging to the specified + YDB. + type: string + type: object + type: array + description: + description: Description of the trail. + type: string + filter: + description: Structure describing event filtering process for + the trail. + items: + properties: + eventFilters: + description: Structure describing filtering process for + the service-specific data plane events. + items: + properties: + categories: + description: blocks. With the introduction of included_events/excluded_events + you can configure filtering per each event type. + items: + properties: + plane: + description: 'Type of the event by its relation + to the cloud resource model. Possible values: + CONTROL_PLANE/DATA_PLANE.' + type: string + type: + description: 'Type of the event by its operation + effect on the resource. Possible values: READ/WRITE.' + type: string + type: object + type: array + pathFilter: + description: with the appropriate resource_scope blocks. + You have to account that resource_scope does not + support specifying relations between resources, + so your configuration will simplify to only the + actual resources, that will be monitored. + items: + properties: + anyFilter: + description: Structure describing that events + will be gathered from all cloud resources + that belong to the parent resource. Mutually + exclusive with some_filter. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent + resource. + type: string + type: object + type: array + someFilter: + description: Structure describing that events + will be gathered from some of the cloud resources + that belong to the parent resource. Mutually + exclusive with any_filter. + items: + properties: + anyFilters: + description: List of child resources from + which events will be gathered. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the + parent resource. + type: string + type: object + type: array + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent + resource. + type: string + type: object + type: array + type: object + type: array + service: + description: ID of the service which events will be + gathered. + type: string + type: object + type: array + pathFilter: + description: block with the filtering_policy.management_events_filter. + New API states management events filtration in a more + clear way. The resources, that were specified, must migrate + into the filtering_policy.management_events_filter.resource_scope + items: + properties: + anyFilter: + description: Structure describing that events will + be gathered from all cloud resources that belong + to the parent resource. Mutually exclusive with + some_filter. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + someFilter: + description: Structure describing that events will + be gathered from some of the cloud resources that + belong to the parent resource. Mutually exclusive + with any_filter. + items: + properties: + anyFilters: + description: List of child resources from which + events will be gathered. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent + resource. + type: string + type: object + type: array + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + type: object + type: array + type: object + type: array + filteringPolicy: + description: Structure describing event filtering process for + the trail. Mutually exclusive with filter. At least one of the + management_events_filter or data_events_filter fields will be + filled. + items: + properties: + dataEventsFilter: + description: Structure describing filtering process for + the service-specific data events. + items: + properties: + excludedEvents: + description: A list of events that won't be gathered + by the trail from this service. New events will + be automatically gathered when this option is specified. + Mutually exclusive with included_events. + items: + type: string + type: array + includedEvents: + description: A list of events that will be gathered + by the trail from this service. New events won't + be gathered by default when this option is specified. + Mutually exclusive with excluded_events. + items: + type: string + type: array + resourceScope: + description: Structure describing that events will + be gathered from the specified resource. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + service: + description: ID of the service which events will be + gathered. + type: string + type: object + type: array + managementEventsFilter: + description: Structure describing filtering process for + management events. + items: + properties: + resourceScope: + description: Structure describing that events will + be gathered from the specified resource. + items: + properties: + resourceId: + description: ID of the parent resource. + type: string + resourceType: + description: Resource type of the parent resource. + type: string + type: object + type: array + type: object + type: array + type: object + type: array + folderId: + description: ID of the folder to which the trail belongs. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: Labels defined by the user. + type: object + x-kubernetes-map-type: granular + loggingDestination: + description: Structure describing destination log group of the + trail. Mutually exclusive with storage_destination and data_stream_destination. + items: + properties: + logGroupId: + description: ID of the destination Cloud Logging Group. + type: string + type: object + type: array + name: + description: Name of the trail. + type: string + serviceAccountId: + description: ID of the IAM service account that is used by the + trail. + type: string + status: + description: Status of this trail. + type: string + storageDestination: + description: Structure describing destination bucket of the trail. + Mutually exclusive with logging_destination and data_stream_destination. + items: + properties: + bucketName: + description: Name of the destination bucket. + type: string + objectPrefix: + description: Additional prefix of the uploaded objects. + If not specified, objects will be uploaded with prefix + equal to trail_id. + type: string + type: object + type: array + trailId: + description: ID of the trail resource. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/backup.yandex-cloud.upjet.crossplane.io_policies.yaml b/package/crds/backup.yandex-cloud.upjet.crossplane.io_policies.yaml new file mode 100644 index 0000000..e1e8064 --- /dev/null +++ b/package/crds/backup.yandex-cloud.upjet.crossplane.io_policies.yaml @@ -0,0 +1,1293 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: policies.backup.yandex-cloud.upjet.crossplane.io +spec: + group: backup.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Policy is the Schema for the Policys API. Allows management of + Yandex.Cloud Backup Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PolicySpec defines the desired state of Policy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + archiveName: + description: '[Plan ID]-[Unique ID]a) — The name of generated + archives.' + type: string + cbt: + description: '— Configuration of Changed Block Tracking. Available + values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE".' + type: string + compression: + description: '— Archive compression level. Affects CPU. Available + values: "NORMAL", "HIGH", "MAX", "OFF".' + type: string + fastBackupEnabled: + description: — Enable flag + type: boolean + folderId: + description: — days + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + format: + description: '— Format of the backup. It''s strongly recommend + to leave this option empty or "AUTO". Available values: "AUTO", + "VERSION_11", "VERSION_12".' + type: string + multiVolumeSnapshottingEnabled: + description: — If true, snapshots of multiple volumes will be + taken simultaneously. + type: boolean + name: + description: — Name of the policy + type: string + performanceWindowEnabled: + description: — Time windows for performance limitations of backup. + type: boolean + preserveFileSecuritySettings: + description: — Preserves file security settings. It's better to + set this option to true. + type: boolean + quiesceSnapshottingEnabled: + description: — If true, a quiesced snapshot of the virtual machine + will be taken. + type: boolean + reattempts: + description: '— Amount of reattempts that should be performed + while trying to make backup at the host. This attribute consists + of the following parameters:' + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + retention: + description: '— Retention policy for backups. Allows to setup + backups lifecycle. This attribute consists of the following + parameters:' + items: + properties: + afterBackup: + description: — Defines whether retention rule applies after + creating backup or before. + type: boolean + rules: + description: — seconds + items: + properties: + maxAge: + description: (Conflicts with max_count) — Deletes + backups that older than max_age. Exactly one of + max_count or max_age should be set. + type: string + maxCount: + description: (Conflicts with max_age) — Deletes backups + if it's count exceeds max_count. Exactly one of + max_count or max_age should be set. + type: number + repeatPeriod: + description: — days + items: + type: string + type: array + type: object + type: array + type: object + type: array + scheduling: + description: — Schedule settings for creating backups on the host. + items: + properties: + backupSets: + description: A list of schedules with backup sets that compose + the whole scheme. + items: + properties: + executeByInterval: + description: '— Perform backup by interval, since + last backup of the host. Maximum value is: 9999 + days. See interval_type for available values. Exactly + on of options should be set: execute_by_interval + or execute_by_time.' + type: number + executeByTime: + description: '— Perform backup periodically at specific + time. Exactly on of options should be set: execute_by_interval + or execute_by_time.' + items: + properties: + includeLastDayOfMonth: + description: — If true, schedule will be applied + on the last day of month. See day_type for + available values. + type: boolean + monthdays: + description: — List of days when schedule applies. + Used in "MONTHLY" type. + items: + type: number + type: array + months: + description: — seconds + items: + type: number + type: array + repeatAt: + description: hours format), when the schedule + applies. + items: + type: string + type: array + repeatEvery: + description: — Frequency of backup repetition. + See interval_type for available values. + type: string + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + weekdays: + description: — List of weekdays when the backup + will be applied. Used in "WEEKLY" type. + items: + type: string + type: array + type: object + type: array + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + type: object + type: array + enabled: + description: — Enable flag + type: boolean + executeByInterval: + description: '— Perform backup by interval, since last backup + of the host. Maximum value is: 9999 days. See interval_type + for available values. Exactly on of options should be + set: execute_by_interval or execute_by_time.' + type: number + executeByTime: + description: '— Perform backup periodically at specific + time. Exactly on of options should be set: execute_by_interval + or execute_by_time.' + items: + properties: + includeLastDayOfMonth: + description: — If true, schedule will be applied on + the last day of month. See day_type for available + values. + type: boolean + monthdays: + description: — List of days when schedule applies. + Used in "MONTHLY" type. + items: + type: number + type: array + months: + description: — seconds + items: + type: number + type: array + repeatAt: + description: hours format), when the schedule applies. + items: + type: string + type: array + repeatEvery: + description: — Frequency of backup repetition. See + interval_type for available values. + type: string + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + weekdays: + description: — List of weekdays when the backup will + be applied. Used in "WEEKLY" type. + items: + type: string + type: array + type: object + type: array + maxParallelBackups: + description: — Maximum number of backup processes allowed + to run in parallel. 0 for unlimited. + type: number + randomMaxDelay: + description: — Configuration of the random delay between + the execution of parallel tasks. See interval_type for + available values. + type: string + scheme: + description: '— Scheme of the backups. Available values + are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", + ''WEEKLY_INCREMENTAL".' + type: string + weeklyBackupDay: + description: — A day of week to start weekly backups. See + day_type for available values. + type: string + type: object + type: array + silentModeEnabled: + description: — if true, a user interaction will be avoided when + possible. + type: boolean + splittingBytes: + description: — determines the size to split backups. It's better + to leave this option unchanged. + type: string + vmSnapshotReattempts: + description: '(Requied) — Amount of reattempts that should be + performed while trying to make snapshot. This attribute consists + of the following parameters:' + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + vssProvider: + description: '— Settings for the volume shadow copy service. Available + values are: "NATIVE", "TARGET_SYSTEM_DEFINED"' + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + archiveName: + description: '[Plan ID]-[Unique ID]a) — The name of generated + archives.' + type: string + cbt: + description: '— Configuration of Changed Block Tracking. Available + values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE".' + type: string + compression: + description: '— Archive compression level. Affects CPU. Available + values: "NORMAL", "HIGH", "MAX", "OFF".' + type: string + fastBackupEnabled: + description: — Enable flag + type: boolean + folderId: + description: — days + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + format: + description: '— Format of the backup. It''s strongly recommend + to leave this option empty or "AUTO". Available values: "AUTO", + "VERSION_11", "VERSION_12".' + type: string + multiVolumeSnapshottingEnabled: + description: — If true, snapshots of multiple volumes will be + taken simultaneously. + type: boolean + name: + description: — Name of the policy + type: string + performanceWindowEnabled: + description: — Time windows for performance limitations of backup. + type: boolean + preserveFileSecuritySettings: + description: — Preserves file security settings. It's better to + set this option to true. + type: boolean + quiesceSnapshottingEnabled: + description: — If true, a quiesced snapshot of the virtual machine + will be taken. + type: boolean + reattempts: + description: '— Amount of reattempts that should be performed + while trying to make backup at the host. This attribute consists + of the following parameters:' + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + retention: + description: '— Retention policy for backups. Allows to setup + backups lifecycle. This attribute consists of the following + parameters:' + items: + properties: + afterBackup: + description: — Defines whether retention rule applies after + creating backup or before. + type: boolean + rules: + description: — seconds + items: + properties: + maxAge: + description: (Conflicts with max_count) — Deletes + backups that older than max_age. Exactly one of + max_count or max_age should be set. + type: string + maxCount: + description: (Conflicts with max_age) — Deletes backups + if it's count exceeds max_count. Exactly one of + max_count or max_age should be set. + type: number + repeatPeriod: + description: — days + items: + type: string + type: array + type: object + type: array + type: object + type: array + scheduling: + description: — Schedule settings for creating backups on the host. + items: + properties: + backupSets: + description: A list of schedules with backup sets that compose + the whole scheme. + items: + properties: + executeByInterval: + description: '— Perform backup by interval, since + last backup of the host. Maximum value is: 9999 + days. See interval_type for available values. Exactly + on of options should be set: execute_by_interval + or execute_by_time.' + type: number + executeByTime: + description: '— Perform backup periodically at specific + time. Exactly on of options should be set: execute_by_interval + or execute_by_time.' + items: + properties: + includeLastDayOfMonth: + description: — If true, schedule will be applied + on the last day of month. See day_type for + available values. + type: boolean + monthdays: + description: — List of days when schedule applies. + Used in "MONTHLY" type. + items: + type: number + type: array + months: + description: — seconds + items: + type: number + type: array + repeatAt: + description: hours format), when the schedule + applies. + items: + type: string + type: array + repeatEvery: + description: — Frequency of backup repetition. + See interval_type for available values. + type: string + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + weekdays: + description: — List of weekdays when the backup + will be applied. Used in "WEEKLY" type. + items: + type: string + type: array + type: object + type: array + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + type: object + type: array + enabled: + description: — Enable flag + type: boolean + executeByInterval: + description: '— Perform backup by interval, since last backup + of the host. Maximum value is: 9999 days. See interval_type + for available values. Exactly on of options should be + set: execute_by_interval or execute_by_time.' + type: number + executeByTime: + description: '— Perform backup periodically at specific + time. Exactly on of options should be set: execute_by_interval + or execute_by_time.' + items: + properties: + includeLastDayOfMonth: + description: — If true, schedule will be applied on + the last day of month. See day_type for available + values. + type: boolean + monthdays: + description: — List of days when schedule applies. + Used in "MONTHLY" type. + items: + type: number + type: array + months: + description: — seconds + items: + type: number + type: array + repeatAt: + description: hours format), when the schedule applies. + items: + type: string + type: array + repeatEvery: + description: — Frequency of backup repetition. See + interval_type for available values. + type: string + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + weekdays: + description: — List of weekdays when the backup will + be applied. Used in "WEEKLY" type. + items: + type: string + type: array + type: object + type: array + maxParallelBackups: + description: — Maximum number of backup processes allowed + to run in parallel. 0 for unlimited. + type: number + randomMaxDelay: + description: — Configuration of the random delay between + the execution of parallel tasks. See interval_type for + available values. + type: string + scheme: + description: '— Scheme of the backups. Available values + are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", + ''WEEKLY_INCREMENTAL".' + type: string + weeklyBackupDay: + description: — A day of week to start weekly backups. See + day_type for available values. + type: string + type: object + type: array + silentModeEnabled: + description: — if true, a user interaction will be avoided when + possible. + type: boolean + splittingBytes: + description: — determines the size to split backups. It's better + to leave this option unchanged. + type: string + vmSnapshotReattempts: + description: '(Requied) — Amount of reattempts that should be + performed while trying to make snapshot. This attribute consists + of the following parameters:' + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + vssProvider: + description: '— Settings for the volume shadow copy service. Available + values are: "NATIVE", "TARGET_SYSTEM_DEFINED"' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.reattempts is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.reattempts) + || (has(self.initProvider) && has(self.initProvider.reattempts))' + - message: spec.forProvider.retention is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.retention) + || (has(self.initProvider) && has(self.initProvider.retention))' + - message: spec.forProvider.scheduling is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scheduling) + || (has(self.initProvider) && has(self.initProvider.scheduling))' + - message: spec.forProvider.vmSnapshotReattempts is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vmSnapshotReattempts) + || (has(self.initProvider) && has(self.initProvider.vmSnapshotReattempts))' + status: + description: PolicyStatus defines the observed state of Policy. + properties: + atProvider: + properties: + archiveName: + description: '[Plan ID]-[Unique ID]a) — The name of generated + archives.' + type: string + cbt: + description: '— Configuration of Changed Block Tracking. Available + values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE".' + type: string + compression: + description: '— Archive compression level. Affects CPU. Available + values: "NORMAL", "HIGH", "MAX", "OFF".' + type: string + createdAt: + type: string + enabled: + description: — Enable flag + type: boolean + fastBackupEnabled: + description: — Enable flag + type: boolean + folderId: + description: — days + type: string + format: + description: '— Format of the backup. It''s strongly recommend + to leave this option empty or "AUTO". Available values: "AUTO", + "VERSION_11", "VERSION_12".' + type: string + id: + description: — days + type: string + multiVolumeSnapshottingEnabled: + description: — If true, snapshots of multiple volumes will be + taken simultaneously. + type: boolean + name: + description: — Name of the policy + type: string + performanceWindowEnabled: + description: — Time windows for performance limitations of backup. + type: boolean + preserveFileSecuritySettings: + description: — Preserves file security settings. It's better to + set this option to true. + type: boolean + quiesceSnapshottingEnabled: + description: — If true, a quiesced snapshot of the virtual machine + will be taken. + type: boolean + reattempts: + description: '— Amount of reattempts that should be performed + while trying to make backup at the host. This attribute consists + of the following parameters:' + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + retention: + description: '— Retention policy for backups. Allows to setup + backups lifecycle. This attribute consists of the following + parameters:' + items: + properties: + afterBackup: + description: — Defines whether retention rule applies after + creating backup or before. + type: boolean + rules: + description: — seconds + items: + properties: + maxAge: + description: (Conflicts with max_count) — Deletes + backups that older than max_age. Exactly one of + max_count or max_age should be set. + type: string + maxCount: + description: (Conflicts with max_age) — Deletes backups + if it's count exceeds max_count. Exactly one of + max_count or max_age should be set. + type: number + repeatPeriod: + description: — days + items: + type: string + type: array + type: object + type: array + type: object + type: array + scheduling: + description: — Schedule settings for creating backups on the host. + items: + properties: + backupSets: + description: A list of schedules with backup sets that compose + the whole scheme. + items: + properties: + executeByInterval: + description: '— Perform backup by interval, since + last backup of the host. Maximum value is: 9999 + days. See interval_type for available values. Exactly + on of options should be set: execute_by_interval + or execute_by_time.' + type: number + executeByTime: + description: '— Perform backup periodically at specific + time. Exactly on of options should be set: execute_by_interval + or execute_by_time.' + items: + properties: + includeLastDayOfMonth: + description: — If true, schedule will be applied + on the last day of month. See day_type for + available values. + type: boolean + monthdays: + description: — List of days when schedule applies. + Used in "MONTHLY" type. + items: + type: number + type: array + months: + description: — seconds + items: + type: number + type: array + repeatAt: + description: hours format), when the schedule + applies. + items: + type: string + type: array + repeatEvery: + description: — Frequency of backup repetition. + See interval_type for available values. + type: string + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + weekdays: + description: — List of weekdays when the backup + will be applied. Used in "WEEKLY" type. + items: + type: string + type: array + type: object + type: array + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + type: object + type: array + enabled: + description: — Enable flag + type: boolean + executeByInterval: + description: '— Perform backup by interval, since last backup + of the host. Maximum value is: 9999 days. See interval_type + for available values. Exactly on of options should be + set: execute_by_interval or execute_by_time.' + type: number + executeByTime: + description: '— Perform backup periodically at specific + time. Exactly on of options should be set: execute_by_interval + or execute_by_time.' + items: + properties: + includeLastDayOfMonth: + description: — If true, schedule will be applied on + the last day of month. See day_type for available + values. + type: boolean + monthdays: + description: — List of days when schedule applies. + Used in "MONTHLY" type. + items: + type: number + type: array + months: + description: — seconds + items: + type: number + type: array + repeatAt: + description: hours format), when the schedule applies. + items: + type: string + type: array + repeatEvery: + description: — Frequency of backup repetition. See + interval_type for available values. + type: string + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + weekdays: + description: — List of weekdays when the backup will + be applied. Used in "WEEKLY" type. + items: + type: string + type: array + type: object + type: array + maxParallelBackups: + description: — Maximum number of backup processes allowed + to run in parallel. 0 for unlimited. + type: number + randomMaxDelay: + description: — Configuration of the random delay between + the execution of parallel tasks. See interval_type for + available values. + type: string + scheme: + description: '— Scheme of the backups. Available values + are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", + ''WEEKLY_INCREMENTAL".' + type: string + weeklyBackupDay: + description: — A day of week to start weekly backups. See + day_type for available values. + type: string + type: object + type: array + silentModeEnabled: + description: — if true, a user interaction will be avoided when + possible. + type: boolean + splittingBytes: + description: — determines the size to split backups. It's better + to leave this option unchanged. + type: string + updatedAt: + type: string + vmSnapshotReattempts: + description: '(Requied) — Amount of reattempts that should be + performed while trying to make snapshot. This attribute consists + of the following parameters:' + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + vssProvider: + description: '— Settings for the volume shadow copy service. Available + values are: "NATIVE", "TARGET_SYSTEM_DEFINED"' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/backup.yandex-cloud.upjet.crossplane.io_policybindings.yaml b/package/crds/backup.yandex-cloud.upjet.crossplane.io_policybindings.yaml new file mode 100644 index 0000000..e36114c --- /dev/null +++ b/package/crds/backup.yandex-cloud.upjet.crossplane.io_policybindings.yaml @@ -0,0 +1,508 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: policybindings.backup.yandex-cloud.upjet.crossplane.io +spec: + group: backup.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: PolicyBindings + listKind: PolicyBindingsList + plural: policybindings + singular: policybindings + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PolicyBindings is the Schema for the PolicyBindingss API. Allows + to bind compute instance with backup policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PolicyBindingsSpec defines the desired state of PolicyBindings + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + instanceId: + description: — Compute Cloud instance ID. + type: string + instanceIdRef: + description: Reference to a Instance in compute to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in compute to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + policyId: + description: — Backup Policy ID. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + instanceId: + description: — Compute Cloud instance ID. + type: string + instanceIdRef: + description: Reference to a Instance in compute to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in compute to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + policyId: + description: — Backup Policy ID. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.policyId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.policyId) + || (has(self.initProvider) && has(self.initProvider.policyId))' + status: + description: PolicyBindingsStatus defines the observed state of PolicyBindings. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the Yandex Cloud Policy Bindings. + type: string + enabled: + description: Boolean flag that specifies whether the policy application + is enabled. May be false if processing flag is true. + type: boolean + id: + type: string + instanceId: + description: — Compute Cloud instance ID. + type: string + policyId: + description: — Backup Policy ID. + type: string + processing: + description: Boolean flag that specifies whether the policy is + in the process of binding to an instance. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/billing.yandex-cloud.upjet.crossplane.io_cloudbindings.yaml b/package/crds/billing.yandex-cloud.upjet.crossplane.io_cloudbindings.yaml new file mode 100644 index 0000000..de3522f --- /dev/null +++ b/package/crds/billing.yandex-cloud.upjet.crossplane.io_cloudbindings.yaml @@ -0,0 +1,353 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: cloudbindings.billing.yandex-cloud.upjet.crossplane.io +spec: + group: billing.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CloudBinding + listKind: CloudBindingList + plural: cloudbindings + singular: cloudbinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CloudBinding is the Schema for the CloudBindings API. Bind cloud + to billing account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudBindingSpec defines the desired state of CloudBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + billingAccountId: + description: ID of billing account to bind cloud to. + type: string + cloudId: + description: ID of cloud to bind. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + billingAccountId: + description: ID of billing account to bind cloud to. + type: string + cloudId: + description: ID of cloud to bind. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.billingAccountId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.billingAccountId) + || (has(self.initProvider) && has(self.initProvider.billingAccountId))' + - message: spec.forProvider.cloudId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.cloudId) + || (has(self.initProvider) && has(self.initProvider.cloudId))' + status: + description: CloudBindingStatus defines the observed state of CloudBinding. + properties: + atProvider: + properties: + billingAccountId: + description: ID of billing account to bind cloud to. + type: string + cloudId: + description: ID of cloud to bind. + type: string + id: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/cdn.yandex-cloud.upjet.crossplane.io_origingroups.yaml b/package/crds/cdn.yandex-cloud.upjet.crossplane.io_origingroups.yaml new file mode 100644 index 0000000..021c8f5 --- /dev/null +++ b/package/crds/cdn.yandex-cloud.upjet.crossplane.io_origingroups.yaml @@ -0,0 +1,552 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: origingroups.cdn.yandex-cloud.upjet.crossplane.io +spec: + group: cdn.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: OriginGroup + listKind: OriginGroupList + plural: origingroups + singular: origingroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: OriginGroup is the Schema for the OriginGroups API. Allows management + of a Yandex.Cloud CDN Origin Groups. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OriginGroupSpec defines the desired state of OriginGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + folderId: + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: CDN Origin Group name used to define device. + type: string + origin: + items: + properties: + backup: + type: boolean + enabled: + type: boolean + source: + type: string + type: object + type: array + useNext: + description: If the option is active (has true value), in case + the origin responds with 4XX or 5XX codes, use the next origin + from the list. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + folderId: + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: CDN Origin Group name used to define device. + type: string + origin: + items: + properties: + backup: + type: boolean + enabled: + type: boolean + source: + type: string + type: object + type: array + useNext: + description: If the option is active (has true value), in case + the origin responds with 4XX or 5XX codes, use the next origin + from the list. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.origin is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.origin) + || (has(self.initProvider) && has(self.initProvider.origin))' + status: + description: OriginGroupStatus defines the observed state of OriginGroup. + properties: + atProvider: + properties: + folderId: + type: string + id: + type: string + name: + description: CDN Origin Group name used to define device. + type: string + origin: + items: + properties: + backup: + type: boolean + enabled: + type: boolean + originGroupId: + type: number + source: + type: string + type: object + type: array + useNext: + description: If the option is active (has true value), in case + the origin responds with 4XX or 5XX codes, use the next origin + from the list. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/cdn.yandex-cloud.upjet.crossplane.io_resources.yaml b/package/crds/cdn.yandex-cloud.upjet.crossplane.io_resources.yaml new file mode 100644 index 0000000..d096a5b --- /dev/null +++ b/package/crds/cdn.yandex-cloud.upjet.crossplane.io_resources.yaml @@ -0,0 +1,1184 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: resources.cdn.yandex-cloud.upjet.crossplane.io +spec: + group: cdn.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Resource + listKind: ResourceList + plural: resources + singular: resource + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Resource is the Schema for the Resources API. Allows management + of a Yandex.Cloud CDN Resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourceSpec defines the desired state of Resource + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + active: + description: Flag to create Resource either in active or disabled + state. True - the content from CDN is available to clients. + type: boolean + cname: + description: CDN endpoint CNAME, must be unique among resources. + type: string + folderId: + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + options: + description: CDN Resource settings and options to tune CDN edge + behavior. + items: + properties: + allowedHttpMethods: + description: 'HTTP methods for your CDN content. By default + the following methods are allowed: GET, HEAD, POST, PUT, + PATCH, DELETE, OPTIONS. In case some methods are not allowed + to the user, they will get the 405 (Method Not Allowed) + response. If the method is not supported, the user gets + the 501 (Not Implemented) response.' + items: + type: string + type: array + browserCacheSettings: + description: 'set up a cache period for the end-users browser. + Content will be cached due to origin settings. If there + are no cache settings on your origin, the content will + not be cached. The list of HTTP response codes that can + be cached in browsers: 200, 201, 204, 206, 301, 302, 303, + 304, 307, 308. Other response codes will not be cached. + The default value is 4 days.' + type: number + cacheHttpHeaders: + description: list HTTP headers that must be included in + responses to clients. + items: + type: string + type: array + cors: + description: parameter that lets browsers get access to + selected resources from a domain different to a domain + from which the request is received. + items: + type: string + type: array + customHostHeader: + description: custom value for the Host header. Your server + must be able to process requests with the chosen header. + type: string + customServerName: + description: wildcard additional CNAME. If a resource has + a wildcard additional CNAME, you can use your own certificate + for content delivery via HTTPS. Read-only. + type: string + disableCache: + description: setup a cache status. + type: boolean + disableProxyForceRanges: + description: disabling proxy force ranges. + type: boolean + edgeCacheSettings: + description: content will be cached according to origin + cache settings. The value applies for a response with + codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 + if an origin server does not have caching HTTP headers. + Responses with other codes will not be cached. + type: number + enableIpUrlSigning: + description: enable access limiting by IP addresses, option + available only with setting secure_key. + type: boolean + fetchedCompressed: + description: option helps you to reduce the bandwidth between + origin and CDN servers. Also, content delivery speed becomes + higher because of reducing the time for compressing files + in a CDN. + type: boolean + forwardHostHeader: + description: choose the Forward Host header option if is + important to send in the request to the Origin the same + Host header as was sent in the request to CDN server. + type: boolean + gzipOn: + description: GZip compression at CDN servers reduces file + size by 70% and can be as high as 90%. + type: boolean + ignoreCookie: + description: set for ignoring cookie. + type: boolean + ignoreQueryParams: + description: files with different query parameters are cached + as objects with the same key regardless of the parameter + value. selected by default. + type: boolean + ipAddressAcl: + items: + properties: + exceptedValues: + description: the list of specified IP addresses to + be allowed or denied depending on acl policy type. + items: + type: string + type: array + policyType: + description: the policy type for ip_address_acl option, + one of "allow" or "deny" values. + type: string + type: object + type: array + proxyCacheMethodsSet: + description: allows caching for GET, HEAD and POST requests. + type: boolean + queryParamsBlacklist: + description: files with the specified query parameters are + cached as objects with the same key, files with other + parameters are cached as objects with different keys. + items: + type: string + type: array + queryParamsWhitelist: + description: files with the specified query parameters are + cached as objects with different keys, files with other + parameters are cached as objects with the same key. + items: + type: string + type: array + redirectHttpToHttps: + description: set up a redirect from HTTP to HTTPS. + type: boolean + redirectHttpsToHttp: + description: set up a redirect from HTTPS to HTTP. + type: boolean + secureKey: + description: set secure key for url encoding to protect + contect and limit access by IP addresses and time limits. + type: string + slice: + description: files larger than 10 MB will be requested and + cached in parts (no larger than 10 MB each part). It reduces + time to first byte. The origin must support HTTP Range + requests. + type: boolean + staticRequestHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in requests to origins. + type: object + x-kubernetes-map-type: granular + staticResponseHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in response to clients. + type: object + x-kubernetes-map-type: granular + type: object + type: array + originGroupId: + type: number + originGroupIdRef: + description: Reference to a OriginGroup in cdn to populate originGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originGroupIdSelector: + description: Selector for a OriginGroup in cdn to populate originGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + originGroupName: + type: string + originProtocol: + type: string + secondaryHostnames: + description: list of secondary hostname strings. + items: + type: string + type: array + x-kubernetes-list-type: set + sslCertificate: + description: SSL certificate of CDN resource. + items: + properties: + certificateManagerId: + type: string + type: + type: string + type: object + type: array + updatedAt: + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + active: + description: Flag to create Resource either in active or disabled + state. True - the content from CDN is available to clients. + type: boolean + cname: + description: CDN endpoint CNAME, must be unique among resources. + type: string + folderId: + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + options: + description: CDN Resource settings and options to tune CDN edge + behavior. + items: + properties: + allowedHttpMethods: + description: 'HTTP methods for your CDN content. By default + the following methods are allowed: GET, HEAD, POST, PUT, + PATCH, DELETE, OPTIONS. In case some methods are not allowed + to the user, they will get the 405 (Method Not Allowed) + response. If the method is not supported, the user gets + the 501 (Not Implemented) response.' + items: + type: string + type: array + browserCacheSettings: + description: 'set up a cache period for the end-users browser. + Content will be cached due to origin settings. If there + are no cache settings on your origin, the content will + not be cached. The list of HTTP response codes that can + be cached in browsers: 200, 201, 204, 206, 301, 302, 303, + 304, 307, 308. Other response codes will not be cached. + The default value is 4 days.' + type: number + cacheHttpHeaders: + description: list HTTP headers that must be included in + responses to clients. + items: + type: string + type: array + cors: + description: parameter that lets browsers get access to + selected resources from a domain different to a domain + from which the request is received. + items: + type: string + type: array + customHostHeader: + description: custom value for the Host header. Your server + must be able to process requests with the chosen header. + type: string + customServerName: + description: wildcard additional CNAME. If a resource has + a wildcard additional CNAME, you can use your own certificate + for content delivery via HTTPS. Read-only. + type: string + disableCache: + description: setup a cache status. + type: boolean + disableProxyForceRanges: + description: disabling proxy force ranges. + type: boolean + edgeCacheSettings: + description: content will be cached according to origin + cache settings. The value applies for a response with + codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 + if an origin server does not have caching HTTP headers. + Responses with other codes will not be cached. + type: number + enableIpUrlSigning: + description: enable access limiting by IP addresses, option + available only with setting secure_key. + type: boolean + fetchedCompressed: + description: option helps you to reduce the bandwidth between + origin and CDN servers. Also, content delivery speed becomes + higher because of reducing the time for compressing files + in a CDN. + type: boolean + forwardHostHeader: + description: choose the Forward Host header option if is + important to send in the request to the Origin the same + Host header as was sent in the request to CDN server. + type: boolean + gzipOn: + description: GZip compression at CDN servers reduces file + size by 70% and can be as high as 90%. + type: boolean + ignoreCookie: + description: set for ignoring cookie. + type: boolean + ignoreQueryParams: + description: files with different query parameters are cached + as objects with the same key regardless of the parameter + value. selected by default. + type: boolean + ipAddressAcl: + items: + properties: + exceptedValues: + description: the list of specified IP addresses to + be allowed or denied depending on acl policy type. + items: + type: string + type: array + policyType: + description: the policy type for ip_address_acl option, + one of "allow" or "deny" values. + type: string + type: object + type: array + proxyCacheMethodsSet: + description: allows caching for GET, HEAD and POST requests. + type: boolean + queryParamsBlacklist: + description: files with the specified query parameters are + cached as objects with the same key, files with other + parameters are cached as objects with different keys. + items: + type: string + type: array + queryParamsWhitelist: + description: files with the specified query parameters are + cached as objects with different keys, files with other + parameters are cached as objects with the same key. + items: + type: string + type: array + redirectHttpToHttps: + description: set up a redirect from HTTP to HTTPS. + type: boolean + redirectHttpsToHttp: + description: set up a redirect from HTTPS to HTTP. + type: boolean + secureKey: + description: set secure key for url encoding to protect + contect and limit access by IP addresses and time limits. + type: string + slice: + description: files larger than 10 MB will be requested and + cached in parts (no larger than 10 MB each part). It reduces + time to first byte. The origin must support HTTP Range + requests. + type: boolean + staticRequestHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in requests to origins. + type: object + x-kubernetes-map-type: granular + staticResponseHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in response to clients. + type: object + x-kubernetes-map-type: granular + type: object + type: array + originGroupId: + type: number + originGroupIdRef: + description: Reference to a OriginGroup in cdn to populate originGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originGroupIdSelector: + description: Selector for a OriginGroup in cdn to populate originGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + originGroupName: + type: string + originProtocol: + type: string + secondaryHostnames: + description: list of secondary hostname strings. + items: + type: string + type: array + x-kubernetes-list-type: set + sslCertificate: + description: SSL certificate of CDN resource. + items: + properties: + certificateManagerId: + type: string + type: + type: string + type: object + type: array + updatedAt: + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ResourceStatus defines the observed state of Resource. + properties: + atProvider: + properties: + active: + description: Flag to create Resource either in active or disabled + state. True - the content from CDN is available to clients. + type: boolean + cname: + description: CDN endpoint CNAME, must be unique among resources. + type: string + createdAt: + description: Creation timestamp of the IoT Core Device + type: string + folderId: + type: string + id: + type: string + options: + description: CDN Resource settings and options to tune CDN edge + behavior. + items: + properties: + allowedHttpMethods: + description: 'HTTP methods for your CDN content. By default + the following methods are allowed: GET, HEAD, POST, PUT, + PATCH, DELETE, OPTIONS. In case some methods are not allowed + to the user, they will get the 405 (Method Not Allowed) + response. If the method is not supported, the user gets + the 501 (Not Implemented) response.' + items: + type: string + type: array + browserCacheSettings: + description: 'set up a cache period for the end-users browser. + Content will be cached due to origin settings. If there + are no cache settings on your origin, the content will + not be cached. The list of HTTP response codes that can + be cached in browsers: 200, 201, 204, 206, 301, 302, 303, + 304, 307, 308. Other response codes will not be cached. + The default value is 4 days.' + type: number + cacheHttpHeaders: + description: list HTTP headers that must be included in + responses to clients. + items: + type: string + type: array + cors: + description: parameter that lets browsers get access to + selected resources from a domain different to a domain + from which the request is received. + items: + type: string + type: array + customHostHeader: + description: custom value for the Host header. Your server + must be able to process requests with the chosen header. + type: string + customServerName: + description: wildcard additional CNAME. If a resource has + a wildcard additional CNAME, you can use your own certificate + for content delivery via HTTPS. Read-only. + type: string + disableCache: + description: setup a cache status. + type: boolean + disableProxyForceRanges: + description: disabling proxy force ranges. + type: boolean + edgeCacheSettings: + description: content will be cached according to origin + cache settings. The value applies for a response with + codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 + if an origin server does not have caching HTTP headers. + Responses with other codes will not be cached. + type: number + enableIpUrlSigning: + description: enable access limiting by IP addresses, option + available only with setting secure_key. + type: boolean + fetchedCompressed: + description: option helps you to reduce the bandwidth between + origin and CDN servers. Also, content delivery speed becomes + higher because of reducing the time for compressing files + in a CDN. + type: boolean + forwardHostHeader: + description: choose the Forward Host header option if is + important to send in the request to the Origin the same + Host header as was sent in the request to CDN server. + type: boolean + gzipOn: + description: GZip compression at CDN servers reduces file + size by 70% and can be as high as 90%. + type: boolean + ignoreCookie: + description: set for ignoring cookie. + type: boolean + ignoreQueryParams: + description: files with different query parameters are cached + as objects with the same key regardless of the parameter + value. selected by default. + type: boolean + ipAddressAcl: + items: + properties: + exceptedValues: + description: the list of specified IP addresses to + be allowed or denied depending on acl policy type. + items: + type: string + type: array + policyType: + description: the policy type for ip_address_acl option, + one of "allow" or "deny" values. + type: string + type: object + type: array + proxyCacheMethodsSet: + description: allows caching for GET, HEAD and POST requests. + type: boolean + queryParamsBlacklist: + description: files with the specified query parameters are + cached as objects with the same key, files with other + parameters are cached as objects with different keys. + items: + type: string + type: array + queryParamsWhitelist: + description: files with the specified query parameters are + cached as objects with different keys, files with other + parameters are cached as objects with the same key. + items: + type: string + type: array + redirectHttpToHttps: + description: set up a redirect from HTTP to HTTPS. + type: boolean + redirectHttpsToHttp: + description: set up a redirect from HTTPS to HTTP. + type: boolean + secureKey: + description: set secure key for url encoding to protect + contect and limit access by IP addresses and time limits. + type: string + slice: + description: files larger than 10 MB will be requested and + cached in parts (no larger than 10 MB each part). It reduces + time to first byte. The origin must support HTTP Range + requests. + type: boolean + staticRequestHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in requests to origins. + type: object + x-kubernetes-map-type: granular + staticResponseHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in response to clients. + type: object + x-kubernetes-map-type: granular + type: object + type: array + originGroupId: + type: number + originGroupName: + type: string + originProtocol: + type: string + providerCname: + description: provider CNAME of CDN resource, computed value for + read and update operations. + type: string + secondaryHostnames: + description: list of secondary hostname strings. + items: + type: string + type: array + x-kubernetes-list-type: set + sslCertificate: + description: SSL certificate of CDN resource. + items: + properties: + certificateManagerId: + type: string + status: + type: string + type: + type: string + type: object + type: array + updatedAt: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/cm.yandex-cloud.upjet.crossplane.io_certificates.yaml b/package/crds/cm.yandex-cloud.upjet.crossplane.io_certificates.yaml new file mode 100644 index 0000000..82f3c8a --- /dev/null +++ b/package/crds/cm.yandex-cloud.upjet.crossplane.io_certificates.yaml @@ -0,0 +1,784 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: certificates.cm.yandex-cloud.upjet.crossplane.io +spec: + group: cm.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Certificate + listKind: CertificateList + plural: certificates + singular: certificate + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Certificate is the Schema for the Certificates API. A TLS certificate + signed by a certification authority confirming that it belongs to the owner + of the domain name. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CertificateSpec defines the desired state of Certificate + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deletionProtection: + type: boolean + description: + description: Certificate description. + type: string + domains: + description: Domains for this certificate. Should be specified + for managed certificates. + items: + type: string + type: array + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this certificate. + type: object + x-kubernetes-map-type: granular + managed: + description: Managed specification. Structure is documented below. + items: + properties: + challengeCount: + description: . Expected number of challenge count needed + to validate certificate. Resource creation will fail if + the specified value does not match the actual number of + challenges received from issue provider. This argument + is helpful for safe automatic resource creation for passing + challenges for multi-domain certificates. + type: number + challengeType: + description: 'Domain owner-check method. Possible values:' + type: string + type: object + type: array + name: + description: Certificate name. + type: string + selfManaged: + description: Self-managed specification. Structure is documented + below. + items: + properties: + certificate: + description: Certificate with chain. + type: string + privateKeyLockboxSecret: + description: Lockbox secret specification for getting private + key. Structure is documented below. + items: + properties: + id: + description: Lockbox secret Id. + type: string + key: + description: Key of the Lockbox secret, the value + of which contains the private key of the certificate. + type: string + type: object + type: array + privateKeySecretRef: + description: Private key of certificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deletionProtection: + type: boolean + description: + description: Certificate description. + type: string + domains: + description: Domains for this certificate. Should be specified + for managed certificates. + items: + type: string + type: array + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this certificate. + type: object + x-kubernetes-map-type: granular + managed: + description: Managed specification. Structure is documented below. + items: + properties: + challengeCount: + description: . Expected number of challenge count needed + to validate certificate. Resource creation will fail if + the specified value does not match the actual number of + challenges received from issue provider. This argument + is helpful for safe automatic resource creation for passing + challenges for multi-domain certificates. + type: number + challengeType: + description: 'Domain owner-check method. Possible values:' + type: string + type: object + type: array + name: + description: Certificate name. + type: string + selfManaged: + description: Self-managed specification. Structure is documented + below. + items: + properties: + certificate: + description: Certificate with chain. + type: string + privateKeyLockboxSecret: + description: Lockbox secret specification for getting private + key. Structure is documented below. + items: + properties: + id: + description: Lockbox secret Id. + type: string + key: + description: Key of the Lockbox secret, the value + of which contains the private key of the certificate. + type: string + type: object + type: array + privateKeySecretRef: + description: Private key of certificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: CertificateStatus defines the observed state of Certificate. + properties: + atProvider: + properties: + challenges: + description: Array of challenges. Structure is documented below. + items: + properties: + createdAt: + description: Time the challenge was created. + type: string + dnsName: + description: DNS record name (only for DNS challenge). + type: string + dnsType: + description: 'DNS record type: "TXT" or "CNAME" (only for + DNS challenge).' + type: string + dnsValue: + description: DNS record value (only for DNS challenge). + type: string + domain: + description: Validated domain. + type: string + httpContent: + description: The content that should be made accessible + with the given http_url (only for HTTP challenge). + type: string + httpUrl: + description: URL where the challenge content http_content + should be placed (only for HTTP challenge). + type: string + message: + description: Current status message. + type: string + type: + description: Challenge type "DNS" or "HTTP". + type: string + updatedAt: + description: Last time the challenge was updated. + type: string + type: object + type: array + createdAt: + description: Certificate create timestamp. + type: string + deletionProtection: + type: boolean + description: + description: Certificate description. + type: string + domains: + description: Domains for this certificate. Should be specified + for managed certificates. + items: + type: string + type: array + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + id: + description: Certificate Id. + type: string + issuedAt: + description: Certificate issue timestamp. + type: string + issuer: + description: Certificate issuer. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this certificate. + type: object + x-kubernetes-map-type: granular + managed: + description: Managed specification. Structure is documented below. + items: + properties: + challengeCount: + description: . Expected number of challenge count needed + to validate certificate. Resource creation will fail if + the specified value does not match the actual number of + challenges received from issue provider. This argument + is helpful for safe automatic resource creation for passing + challenges for multi-domain certificates. + type: number + challengeType: + description: 'Domain owner-check method. Possible values:' + type: string + type: object + type: array + name: + description: Certificate name. + type: string + notAfter: + description: Certificate end valid period. + type: string + notBefore: + description: Certificate start valid period. + type: string + selfManaged: + description: Self-managed specification. Structure is documented + below. + items: + properties: + certificate: + description: Certificate with chain. + type: string + privateKeyLockboxSecret: + description: Lockbox secret specification for getting private + key. Structure is documented below. + items: + properties: + id: + description: Lockbox secret Id. + type: string + key: + description: Key of the Lockbox secret, the value + of which contains the private key of the certificate. + type: string + type: object + type: array + type: object + type: array + serial: + description: Certificate serial number. + type: string + status: + description: 'Certificate status: "VALIDATING", "INVALID", "ISSUED", + "REVOKED", "RENEWING" or "RENEWAL_FAILED".' + type: string + subject: + description: Certificate subject. + type: string + type: + description: 'Certificate type: "MANAGED" or "IMPORTED".' + type: string + updatedAt: + description: Certificate update timestamp. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskiambindings.yaml new file mode 100644 index 0000000..2376138 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskiambindings.yaml @@ -0,0 +1,384 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: diskiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: DiskIAMBinding + listKind: DiskIAMBindingList + plural: diskiambindings + singular: diskiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DiskIAMBinding is the Schema for the DiskIAMBindings API. Allows + management of a single IAM binding for a Disk. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DiskIAMBindingSpec defines the desired state of DiskIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + diskId: + description: ID of the disk to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_disk_iam_binding + can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + diskId: + description: ID of the disk to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_disk_iam_binding + can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.diskId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.diskId) + || (has(self.initProvider) && has(self.initProvider.diskId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: DiskIAMBindingStatus defines the observed state of DiskIAMBinding. + properties: + atProvider: + properties: + diskId: + description: ID of the disk to attach the policy to. + type: string + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_disk_iam_binding + can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskplacementgroupiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskplacementgroupiambindings.yaml new file mode 100644 index 0000000..6d61055 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskplacementgroupiambindings.yaml @@ -0,0 +1,389 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: diskplacementgroupiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: DiskPlacementGroupIAMBinding + listKind: DiskPlacementGroupIAMBindingList + plural: diskplacementgroupiambindings + singular: diskplacementgroupiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DiskPlacementGroupIAMBinding is the Schema for the DiskPlacementGroupIAMBindings + API. Allows management of a single IAM binding for a Disk Placement Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DiskPlacementGroupIAMBindingSpec defines the desired state + of DiskPlacementGroupIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + diskPlacementGroupId: + description: ID of the disk placement group to attach the policy + to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_disk_placement_group_iam_binding + can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + diskPlacementGroupId: + description: ID of the disk placement group to attach the policy + to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_disk_placement_group_iam_binding + can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.diskPlacementGroupId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.diskPlacementGroupId) + || (has(self.initProvider) && has(self.initProvider.diskPlacementGroupId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: DiskPlacementGroupIAMBindingStatus defines the observed state + of DiskPlacementGroupIAMBinding. + properties: + atProvider: + properties: + diskPlacementGroupId: + description: ID of the disk placement group to attach the policy + to. + type: string + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_disk_placement_group_iam_binding + can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskplacementgroups.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskplacementgroups.yaml new file mode 100644 index 0000000..9bb472e --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskplacementgroups.yaml @@ -0,0 +1,546 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: diskplacementgroups.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: DiskPlacementGroup + listKind: DiskPlacementGroupList + plural: diskplacementgroups + singular: diskplacementgroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DiskPlacementGroup is the Schema for the DiskPlacementGroups + API. Manages a Disk Placement Group resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DiskPlacementGroupSpec defines the desired state of DiskPlacementGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description of the Disk Placement Group. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Disk + Placement Group. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Disk Placement Group. + type: string + zone: + description: ID of the zone where the Disk Placement Group resides. + Default is ru-central1-b + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description of the Disk Placement Group. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Disk + Placement Group. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Disk Placement Group. + type: string + zone: + description: ID of the zone where the Disk Placement Group resides. + Default is ru-central1-b + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DiskPlacementGroupStatus defines the observed state of DiskPlacementGroup. + properties: + atProvider: + properties: + createdAt: + type: string + description: + description: A description of the Disk Placement Group. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Disk + Placement Group. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Disk Placement Group. + type: string + status: + description: Status of the Disk Placement Group. + type: string + zone: + description: ID of the zone where the Disk Placement Group resides. + Default is ru-central1-b + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_disks.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_disks.yaml new file mode 100644 index 0000000..e1bdb83 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_disks.yaml @@ -0,0 +1,1038 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: disks.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Disk + listKind: DiskList + plural: disks + singular: disk + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Disk is the Schema for the Disks API. Persistent disks are durable + storage devices that function similarly to the physical disks in a desktop + or a server. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DiskSpec defines the desired state of Disk + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowRecreate: + description: Default is 5 minutes. + type: boolean + blockSize: + description: Block size of the disk, specified in bytes. + type: number + description: + description: Description of the disk. Provide this property when + you create a resource. + type: string + diskPlacementPolicy: + description: Disk placement policy configuration. The structure + is documented below. + items: + properties: + diskPlacementGroupId: + description: Specifies Disk Placement Group id. + type: string + diskPlacementGroupIdRef: + description: Reference to a DiskPlacementGroup in compute + to populate diskPlacementGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + diskPlacementGroupIdSelector: + description: Selector for a DiskPlacementGroup in compute + to populate diskPlacementGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + folderId: + description: The ID of the folder that the disk belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hardwareGeneration: + description: |- + Hardware generation and its features, + which will be applied to the instance when this disk is used as a boot + disk. Provide this property if you wish to override this value, which + otherwise is inherited from the source. The structure is documented below. + items: + properties: + generation2Features: + description: A newer hardware generation, which always uses + PCI_TOPOLOGY_V2 and UEFI boot. + items: + type: object + type: array + legacyFeatures: + description: 'Defines the first known hardware generation + and its features, which are:' + items: + properties: + pciTopology: + description: A variant of PCI topology, one of PCI_TOPOLOGY_V1 + or PCI_TOPOLOGY_V2. + type: string + type: object + type: array + type: object + type: array + imageId: + description: The source image to use for disk creation. + type: string + imageIdRef: + description: Reference to a Image to populate imageId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageIdSelector: + description: Selector for a Image to populate imageId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this disk. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the disk. Provide this property when you + create a resource. + type: string + size: + description: Size of the persistent disk, specified in GB. You + can specify this field when creating a persistent disk using + the image_id or snapshot_id parameter, or specify it alone to + create an empty persistent disk. If you specify this field along + with image_id or snapshot_id, the size value must not be less + than the size of the source image or the size of the snapshot. + type: number + snapshotId: + description: The source snapshot to use for disk creation. + type: string + type: + description: Type of disk to create. Provide this when creating + a disk. + type: string + zone: + description: Availability zone where the disk will reside. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowRecreate: + description: Default is 5 minutes. + type: boolean + blockSize: + description: Block size of the disk, specified in bytes. + type: number + description: + description: Description of the disk. Provide this property when + you create a resource. + type: string + diskPlacementPolicy: + description: Disk placement policy configuration. The structure + is documented below. + items: + properties: + diskPlacementGroupId: + description: Specifies Disk Placement Group id. + type: string + diskPlacementGroupIdRef: + description: Reference to a DiskPlacementGroup in compute + to populate diskPlacementGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + diskPlacementGroupIdSelector: + description: Selector for a DiskPlacementGroup in compute + to populate diskPlacementGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + folderId: + description: The ID of the folder that the disk belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hardwareGeneration: + description: |- + Hardware generation and its features, + which will be applied to the instance when this disk is used as a boot + disk. Provide this property if you wish to override this value, which + otherwise is inherited from the source. The structure is documented below. + items: + properties: + generation2Features: + description: A newer hardware generation, which always uses + PCI_TOPOLOGY_V2 and UEFI boot. + items: + type: object + type: array + legacyFeatures: + description: 'Defines the first known hardware generation + and its features, which are:' + items: + properties: + pciTopology: + description: A variant of PCI topology, one of PCI_TOPOLOGY_V1 + or PCI_TOPOLOGY_V2. + type: string + type: object + type: array + type: object + type: array + imageId: + description: The source image to use for disk creation. + type: string + imageIdRef: + description: Reference to a Image to populate imageId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageIdSelector: + description: Selector for a Image to populate imageId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this disk. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the disk. Provide this property when you + create a resource. + type: string + size: + description: Size of the persistent disk, specified in GB. You + can specify this field when creating a persistent disk using + the image_id or snapshot_id parameter, or specify it alone to + create an empty persistent disk. If you specify this field along + with image_id or snapshot_id, the size value must not be less + than the size of the source image or the size of the snapshot. + type: number + snapshotId: + description: The source snapshot to use for disk creation. + type: string + type: + description: Type of disk to create. Provide this when creating + a disk. + type: string + zone: + description: Availability zone where the disk will reside. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DiskStatus defines the observed state of Disk. + properties: + atProvider: + properties: + allowRecreate: + description: Default is 5 minutes. + type: boolean + blockSize: + description: Block size of the disk, specified in bytes. + type: number + createdAt: + description: Creation timestamp of the disk. + type: string + description: + description: Description of the disk. Provide this property when + you create a resource. + type: string + diskPlacementPolicy: + description: Disk placement policy configuration. The structure + is documented below. + items: + properties: + diskPlacementGroupId: + description: Specifies Disk Placement Group id. + type: string + type: object + type: array + folderId: + description: The ID of the folder that the disk belongs to. If + it is not provided, the default provider folder is used. + type: string + hardwareGeneration: + description: |- + Hardware generation and its features, + which will be applied to the instance when this disk is used as a boot + disk. Provide this property if you wish to override this value, which + otherwise is inherited from the source. The structure is documented below. + items: + properties: + generation2Features: + description: A newer hardware generation, which always uses + PCI_TOPOLOGY_V2 and UEFI boot. + items: + type: object + type: array + legacyFeatures: + description: 'Defines the first known hardware generation + and its features, which are:' + items: + properties: + pciTopology: + description: A variant of PCI topology, one of PCI_TOPOLOGY_V1 + or PCI_TOPOLOGY_V2. + type: string + type: object + type: array + type: object + type: array + id: + type: string + imageId: + description: The source image to use for disk creation. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this disk. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the disk. Provide this property when you + create a resource. + type: string + productIds: + items: + type: string + type: array + size: + description: Size of the persistent disk, specified in GB. You + can specify this field when creating a persistent disk using + the image_id or snapshot_id parameter, or specify it alone to + create an empty persistent disk. If you specify this field along + with image_id or snapshot_id, the size value must not be less + than the size of the source image or the size of the snapshot. + type: number + snapshotId: + description: The source snapshot to use for disk creation. + type: string + status: + description: The status of the disk. + type: string + type: + description: Type of disk to create. Provide this when creating + a disk. + type: string + zone: + description: Availability zone where the disk will reside. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_filesystemiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_filesystemiambindings.yaml new file mode 100644 index 0000000..c1ed57d --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_filesystemiambindings.yaml @@ -0,0 +1,385 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: filesystemiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: FilesystemIAMBinding + listKind: FilesystemIAMBindingList + plural: filesystemiambindings + singular: filesystemiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: FilesystemIAMBinding is the Schema for the FilesystemIAMBindings + API. Allows management of a single IAM binding for a Filesystem. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FilesystemIAMBindingSpec defines the desired state of FilesystemIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + filesystemId: + description: ID of the filesystem to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_filesystem_iam_binding + can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + filesystemId: + description: ID of the filesystem to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_filesystem_iam_binding + can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.filesystemId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.filesystemId) + || (has(self.initProvider) && has(self.initProvider.filesystemId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: FilesystemIAMBindingStatus defines the observed state of + FilesystemIAMBinding. + properties: + atProvider: + properties: + filesystemId: + description: ID of the filesystem to attach the policy to. + type: string + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_filesystem_iam_binding + can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_filesystems.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_filesystems.yaml new file mode 100644 index 0000000..5c94e45 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_filesystems.yaml @@ -0,0 +1,581 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: filesystems.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Filesystem + listKind: FilesystemList + plural: filesystems + singular: filesystem + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Filesystem is the Schema for the Filesystems API. File storage + is a virtual file system that can be attached to multiple Compute Cloud + VMs in the same availability zone. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FilesystemSpec defines the desired state of Filesystem + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + blockSize: + description: Block size of the filesystem, specified in bytes. + type: number + description: + description: Description of the filesystem. Provide this property + when you create a resource. + type: string + folderId: + description: The ID of the folder that the filesystem belongs + to. If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this filesystem. A list of key/value + pairs. For details about the concept, see documentation. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the filesystem. Provide this property when + you create a resource. + type: string + size: + description: Size of the filesystem, specified in GB. + type: number + type: + description: Type of filesystem to create. Type network-hdd is + set by default. + type: string + zone: + description: Availability zone where the filesystem will reside. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + blockSize: + description: Block size of the filesystem, specified in bytes. + type: number + description: + description: Description of the filesystem. Provide this property + when you create a resource. + type: string + folderId: + description: The ID of the folder that the filesystem belongs + to. If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this filesystem. A list of key/value + pairs. For details about the concept, see documentation. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the filesystem. Provide this property when + you create a resource. + type: string + size: + description: Size of the filesystem, specified in GB. + type: number + type: + description: Type of filesystem to create. Type network-hdd is + set by default. + type: string + zone: + description: Availability zone where the filesystem will reside. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: FilesystemStatus defines the observed state of Filesystem. + properties: + atProvider: + properties: + blockSize: + description: Block size of the filesystem, specified in bytes. + type: number + createdAt: + description: Creation timestamp of the filesystem. + type: string + description: + description: Description of the filesystem. Provide this property + when you create a resource. + type: string + folderId: + description: The ID of the folder that the filesystem belongs + to. If it is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this filesystem. A list of key/value + pairs. For details about the concept, see documentation. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the filesystem. Provide this property when + you create a resource. + type: string + size: + description: Size of the filesystem, specified in GB. + type: number + status: + description: The status of the filesystem. + type: string + type: + description: Type of filesystem to create. Type network-hdd is + set by default. + type: string + zone: + description: Availability zone where the filesystem will reside. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_gpuclusteriambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_gpuclusteriambindings.yaml new file mode 100644 index 0000000..b5facab --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_gpuclusteriambindings.yaml @@ -0,0 +1,385 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: gpuclusteriambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: GpuClusterIAMBinding + listKind: GpuClusterIAMBindingList + plural: gpuclusteriambindings + singular: gpuclusteriambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: GpuClusterIAMBinding is the Schema for the GpuClusterIAMBindings + API. Allows management of a single IAM binding for a GPU Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GpuClusterIAMBindingSpec defines the desired state of GpuClusterIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + gpuClusterId: + description: ID of the gpu cluster to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_gpu_cluster_iam_binding + can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + gpuClusterId: + description: ID of the gpu cluster to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_gpu_cluster_iam_binding + can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.gpuClusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gpuClusterId) + || (has(self.initProvider) && has(self.initProvider.gpuClusterId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: GpuClusterIAMBindingStatus defines the observed state of + GpuClusterIAMBinding. + properties: + atProvider: + properties: + gpuClusterId: + description: ID of the gpu cluster to attach the policy to. + type: string + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_gpu_cluster_iam_binding + can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_gpuclusters.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_gpuclusters.yaml new file mode 100644 index 0000000..cc56b31 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_gpuclusters.yaml @@ -0,0 +1,566 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: gpuclusters.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: GpuCluster + listKind: GpuClusterList + plural: gpuclusters + singular: gpucluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: GpuCluster is the Schema for the GpuClusters API. GPU Cluster + connects multiple Compute GPU Instances in the same availability zone with + high-speed low-latency network. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GpuClusterSpec defines the desired state of GpuCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the GPU cluster. Provide this property + when you create a resource. + type: string + folderId: + description: The ID of the folder that the GPU cluster belongs + to. If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + interconnectType: + description: Type of interconnect between nodes to use in GPU + cluster. Type infiniband is set by default, and it is the only + one available at the moment. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this GPU cluster. A list of key/value + pairs. For details about the concept, see documentation. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the GPU cluster. Provide this property when + you create a resource. + type: string + zone: + description: Availability zone where the GPU cluster will reside. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the GPU cluster. Provide this property + when you create a resource. + type: string + folderId: + description: The ID of the folder that the GPU cluster belongs + to. If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + interconnectType: + description: Type of interconnect between nodes to use in GPU + cluster. Type infiniband is set by default, and it is the only + one available at the moment. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this GPU cluster. A list of key/value + pairs. For details about the concept, see documentation. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the GPU cluster. Provide this property when + you create a resource. + type: string + zone: + description: Availability zone where the GPU cluster will reside. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: GpuClusterStatus defines the observed state of GpuCluster. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the GPU cluster. + type: string + description: + description: Description of the GPU cluster. Provide this property + when you create a resource. + type: string + folderId: + description: The ID of the folder that the GPU cluster belongs + to. If it is not provided, the default provider folder is used. + type: string + id: + type: string + interconnectType: + description: Type of interconnect between nodes to use in GPU + cluster. Type infiniband is set by default, and it is the only + one available at the moment. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this GPU cluster. A list of key/value + pairs. For details about the concept, see documentation. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the GPU cluster. Provide this property when + you create a resource. + type: string + status: + description: The status of the GPU cluster. + type: string + zone: + description: Availability zone where the GPU cluster will reside. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_imageiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_imageiambindings.yaml new file mode 100644 index 0000000..0a5ef7a --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_imageiambindings.yaml @@ -0,0 +1,384 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: imageiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ImageIAMBinding + listKind: ImageIAMBindingList + plural: imageiambindings + singular: imageiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ImageIAMBinding is the Schema for the ImageIAMBindings API. Allows + management of a single IAM binding for an image. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ImageIAMBindingSpec defines the desired state of ImageIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + imageId: + description: ID of the image to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_image_iam_binding + can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + imageId: + description: ID of the image to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_image_iam_binding + can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.imageId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.imageId) + || (has(self.initProvider) && has(self.initProvider.imageId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: ImageIAMBindingStatus defines the observed state of ImageIAMBinding. + properties: + atProvider: + properties: + id: + type: string + imageId: + description: ID of the image to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_image_iam_binding + can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_images.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_images.yaml new file mode 100644 index 0000000..9ab0952 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_images.yaml @@ -0,0 +1,715 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: images.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Image + listKind: ImageList + plural: images + singular: image + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Image is the Schema for the Images API. Creates a VM image for + the Yandex Compute service from an existing tarball. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ImageSpec defines the desired state of Image + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional description of the image. Provide this + property when you create a resource. + type: string + family: + description: The name of the image family to which this image + belongs. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hardwareGeneration: + items: + properties: + generation2Features: + items: + type: object + type: array + legacyFeatures: + items: + properties: + pciTopology: + type: string + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the image. + type: object + x-kubernetes-map-type: granular + minDiskSize: + description: Minimum size in GB of the disk that will be created + from this image. + type: number + name: + description: Name of the disk. + type: string + osType: + description: 'Operating system type that is contained in the image. + Possible values: "LINUX", "WINDOWS".' + type: string + pooled: + description: Optimize the image to create a disk. + type: boolean + productIds: + description: License IDs that indicate which licenses are attached + to this image. + items: + type: string + type: array + x-kubernetes-list-type: set + sourceDisk: + description: The ID of a disk to use as the source of the image. + Changing this ID forces a new resource to be created. + type: string + sourceFamily: + description: The name of the family to use as the source of the + new image. The ID of the latest image is taken from the "standard-images" + folder. Changing the family forces a new resource to be created. + type: string + sourceImage: + description: The ID of an existing image to use as the source + of the image. Changing this ID forces a new resource to be created. + type: string + sourceSnapshot: + description: The ID of a snapshot to use as the source of the + image. Changing this ID forces a new resource to be created. + type: string + sourceUrl: + description: The URL to use as the source of the image. Changing + this URL forces a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional description of the image. Provide this + property when you create a resource. + type: string + family: + description: The name of the image family to which this image + belongs. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hardwareGeneration: + items: + properties: + generation2Features: + items: + type: object + type: array + legacyFeatures: + items: + properties: + pciTopology: + type: string + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the image. + type: object + x-kubernetes-map-type: granular + minDiskSize: + description: Minimum size in GB of the disk that will be created + from this image. + type: number + name: + description: Name of the disk. + type: string + osType: + description: 'Operating system type that is contained in the image. + Possible values: "LINUX", "WINDOWS".' + type: string + pooled: + description: Optimize the image to create a disk. + type: boolean + productIds: + description: License IDs that indicate which licenses are attached + to this image. + items: + type: string + type: array + x-kubernetes-list-type: set + sourceDisk: + description: The ID of a disk to use as the source of the image. + Changing this ID forces a new resource to be created. + type: string + sourceFamily: + description: The name of the family to use as the source of the + new image. The ID of the latest image is taken from the "standard-images" + folder. Changing the family forces a new resource to be created. + type: string + sourceImage: + description: The ID of an existing image to use as the source + of the image. Changing this ID forces a new resource to be created. + type: string + sourceSnapshot: + description: The ID of a snapshot to use as the source of the + image. Changing this ID forces a new resource to be created. + type: string + sourceUrl: + description: The URL to use as the source of the image. Changing + this URL forces a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ImageStatus defines the observed state of Image. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the image. + type: string + description: + description: An optional description of the image. Provide this + property when you create a resource. + type: string + family: + description: The name of the image family to which this image + belongs. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + hardwareGeneration: + items: + properties: + generation2Features: + items: + type: object + type: array + legacyFeatures: + items: + properties: + pciTopology: + type: string + type: object + type: array + type: object + type: array + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the image. + type: object + x-kubernetes-map-type: granular + minDiskSize: + description: Minimum size in GB of the disk that will be created + from this image. + type: number + name: + description: Name of the disk. + type: string + osType: + description: 'Operating system type that is contained in the image. + Possible values: "LINUX", "WINDOWS".' + type: string + pooled: + description: Optimize the image to create a disk. + type: boolean + productIds: + description: License IDs that indicate which licenses are attached + to this image. + items: + type: string + type: array + x-kubernetes-list-type: set + size: + description: The size of the image, specified in GB. + type: number + sourceDisk: + description: The ID of a disk to use as the source of the image. + Changing this ID forces a new resource to be created. + type: string + sourceFamily: + description: The name of the family to use as the source of the + new image. The ID of the latest image is taken from the "standard-images" + folder. Changing the family forces a new resource to be created. + type: string + sourceImage: + description: The ID of an existing image to use as the source + of the image. Changing this ID forces a new resource to be created. + type: string + sourceSnapshot: + description: The ID of a snapshot to use as the source of the + image. Changing this ID forces a new resource to be created. + type: string + sourceUrl: + description: The URL to use as the source of the image. Changing + this URL forces a new resource to be created. + type: string + status: + description: The status of the image. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_instancegroups.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_instancegroups.yaml new file mode 100644 index 0000000..b420d33 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_instancegroups.yaml @@ -0,0 +1,3599 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: instancegroups.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: InstanceGroup + listKind: InstanceGroupList + plural: instancegroups + singular: instancegroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: InstanceGroup is the Schema for the InstanceGroups API. Manages + an Instance group resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceGroupSpec defines the desired state of InstanceGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allocationPolicy: + description: The allocation policy of the instance group by zone + and region. The structure is documented below. + items: + properties: + instanceTagsPool: + description: Array of availability zone IDs with list of + instance tags. + items: + properties: + tags: + description: List of tags for instances in zone. + items: + type: string + type: array + zone: + description: Availability zone. + type: string + type: object + type: array + zones: + description: A list of availability zones. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + applicationLoadBalancer: + description: Application Load balancing (L7) specifications. The + structure is documented below. + items: + properties: + ignoreHealthChecks: + description: Do not wait load balancer health checks. + type: boolean + maxOpeningTrafficDuration: + description: Timeout for waiting for the VM to be checked + by the load balancer. If the timeout is exceeded, the + VM will be turned off based on the deployment policy. + Specified in seconds. + type: number + targetGroupDescription: + description: A description of the target group. + type: string + targetGroupLabels: + additionalProperties: + type: string + description: A set of key/value label pairs. + type: object + x-kubernetes-map-type: granular + targetGroupName: + description: The name of the target group. + type: string + type: object + type: array + deletionProtection: + description: Flag that protects the instance group from accidental + deletion. + type: boolean + deployPolicy: + description: The deployment policy of the instance group. The + structure is documented below. + items: + properties: + maxCreating: + description: The maximum number of instances that can be + created at the same time. + type: number + maxDeleting: + description: The maximum number of instances that can be + deleted at the same time. + type: number + maxExpansion: + description: The maximum number of instances that can be + temporarily allocated above the group's target size during + the update process. + type: number + maxUnavailable: + description: The maximum number of running instances that + can be taken offline (stopped or deleted) at the same + time during the update process. + type: number + startupDuration: + description: The amount of time in seconds to allow for + an instance to start. Instance will be considered up and + running (and start receiving traffic) only after the startup_duration + has elapsed and all health checks are passed. + type: number + strategy: + description: Affects the lifecycle of the instance during + deployment. If set to proactive (default), Instance Groups + can forcefully stop a running instance. If opportunistic, + Instance Groups does not stop a running instance. Instead, + it will wait until the instance stops itself or becomes + unhealthy. + type: string + type: object + type: array + description: + description: A description of the instance group. + type: string + folderId: + description: The ID of the folder that the resources belong to. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + healthCheck: + description: Health check specifications. The structure is documented + below. + items: + properties: + healthyThreshold: + description: The number of successful health checks before + the managed instance is declared healthy. + type: number + httpOptions: + description: HTTP check options. The structure is documented + below. + items: + properties: + path: + description: The URL path used for health check requests. + type: string + port: + description: The port used for TCP health checks. + type: number + type: object + type: array + interval: + description: The interval to wait between health checks + in seconds. + type: number + tcpOptions: + description: TCP check options. The structure is documented + below. + items: + properties: + port: + description: The port used for TCP health checks. + type: number + type: object + type: array + timeout: + description: The length of time to wait for a response before + the health check times out in seconds. + type: number + unhealthyThreshold: + description: The number of failed health checks before the + managed instance is declared unhealthy. + type: number + type: object + type: array + instanceTemplate: + description: The template for creating new instances. The structure + is documented below. + items: + properties: + bootDisk: + description: Boot disk specifications for the instance. + The structure is documented below. + items: + properties: + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + diskId: + description: ID of the existing disk. To set use variables. + type: string + initializeParams: + description: Parameters used for creating a disk alongside + the instance. The structure is documented below. + items: + properties: + description: + description: A description of the instance. + type: string + imageId: + description: The disk image to initialize this + disk from. + type: string + imageIdRef: + description: Reference to a Image to populate + imageId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageIdSelector: + description: Selector for a Image to populate + imageId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + size: + description: The size of the disk in GB. + type: number + snapshotId: + description: The snapshot to initialize this + disk from. + type: string + type: + description: Network acceleration type. By default + a network is in STANDARD mode. + type: string + type: object + type: array + mode: + description: The access mode to the disk resource. + By default a disk is attached in READ_WRITE mode. + type: string + name: + description: When set can be later used to change + DiskSpec of actual disk. + type: string + type: object + type: array + description: + description: A description of the instance. + type: string + filesystem: + description: List of filesystems to attach to the instance. + The structure is documented below. + items: + properties: + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + filesystemId: + description: ID of the filesystem that should be attached. + type: string + mode: + description: The access mode to the disk resource. + By default a disk is attached in READ_WRITE mode. + type: string + type: object + type: array + hostname: + description: |- + Hostname template for the instance. + This field is used to generate the FQDN value of instance. + The hostname must be unique within the network and region. + If not specified, the hostname will be equal to id of the instance + and FQDN will be .auto.internal. Otherwise FQDN will be ..internal. + In order to be unique it must contain at least on of instance unique placeholders: + {instance.short_id} + {instance.index} + combination of {instance.zone_id} and {instance.index_in_zone} + Example: my-instance-{instance.index} + If not set, name value will be used + It may also contain another placeholders, see metadata doc for full list. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to + the instance. + type: object + x-kubernetes-map-type: granular + metadata: + additionalProperties: + type: string + description: A set of metadata key/value pairs to make available + from within the instance. + type: object + x-kubernetes-map-type: granular + metadataOptions: + description: Options allow user to configure access to managed + instances metadata + items: + properties: + awsV1HttpEndpoint: + type: number + awsV1HttpToken: + type: number + gceHttpEndpoint: + type: number + gceHttpToken: + type: number + type: object + type: array + name: + description: |- + Name template of the instance. + In order to be unique it must contain at least one of instance unique placeholders: + {instance.short_id} + {instance.index} + combination of {instance.zone_id} and {instance.index_in_zone} + Example: my-instance-{instance.index} + If not set, default is used: {instance_group.id}-{instance.short_id} + It may also contain another placeholders, see metadata doc for full list. + type: string + networkInterface: + description: Network specifications for the instance. This + can be used multiple times for adding multiple interfaces. + The structure is documented below. + items: + properties: + dnsRecord: + description: List of dns records. The structure is + documented below. + items: + properties: + dnsZoneId: + description: DNS zone id (if not set, private + zone used). + type: string + fqdn: + description: DNS record fqdn (must have dot + at the end). + type: string + ptr: + description: When set to true, also create PTR + DNS record. + type: boolean + ttl: + description: DNS record TTL. + type: number + type: object + type: array + ipAddress: + description: Manual set static IP address. + type: string + ipv4: + description: True if IPv4 address allocated for the + network interface. + type: boolean + ipv6: + type: boolean + ipv6Address: + description: Manual set static IPv6 address. + type: string + ipv6DnsRecord: + description: List of ipv6 dns records. The structure + is documented below. + items: + properties: + dnsZoneId: + description: DNS zone id (if not set, private + zone used). + type: string + fqdn: + description: DNS record fqdn (must have dot + at the end). + type: string + ptr: + description: When set to true, also create PTR + DNS record. + type: boolean + ttl: + description: DNS record TTL. + type: number + type: object + type: array + nat: + description: Flag for using NAT. + type: boolean + natDnsRecord: + description: List of nat dns records. The structure + is documented below. + items: + properties: + dnsZoneId: + description: DNS zone id (if not set, private + zone used). + type: string + fqdn: + description: DNS record fqdn (must have dot + at the end). + type: string + ptr: + description: When set to true, also create PTR + DNS record. + type: boolean + ttl: + description: DNS record TTL. + type: number + type: object + type: array + natIpAddress: + description: A public address that can be used to + access the internet over NAT. Use variables to set. + type: string + networkId: + description: The ID of the network. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate + networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate + networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to + populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The ID of the subnets to attach this + interface to. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in vpc to populate + subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in vpc + to populate subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + networkSettings: + description: Network acceleration type for instance. The + structure is documented below. + items: + properties: + type: + description: Network acceleration type. By default + a network is in STANDARD mode. + type: string + type: object + type: array + placementPolicy: + description: The placement policy configuration. The structure + is documented below. + items: + properties: + placementGroupId: + description: Specifies the id of the Placement Group + to assign to the instances. + type: string + type: object + type: array + platformId: + description: The ID of the hardware platform configuration + for the instance. The default is 'standard-v1'. + type: string + resources: + description: Compute resource specifications for the instance. + The structure is documented below. + items: + properties: + coreFraction: + description: If provided, specifies baseline core + performance as a percent. + type: number + cores: + description: The number of CPU cores for the instance. + type: number + gpus: + type: number + memory: + description: The memory size in GB. + type: number + type: object + type: array + schedulingPolicy: + description: The scheduling policy configuration. The structure + is documented below. + items: + properties: + preemptible: + description: Specifies if the instance is preemptible. + Defaults to false. + type: boolean + type: object + type: array + secondaryDisk: + description: A list of disks to attach to the instance. + The structure is documented below. + items: + properties: + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + diskId: + description: ID of the existing disk. To set use variables. + type: string + initializeParams: + description: Parameters used for creating a disk alongside + the instance. The structure is documented below. + items: + properties: + description: + description: A description of the instance. + type: string + imageId: + description: The disk image to initialize this + disk from. + type: string + size: + description: The size of the disk in GB. + type: number + snapshotId: + description: The snapshot to initialize this + disk from. + type: string + type: + description: Network acceleration type. By default + a network is in STANDARD mode. + type: string + type: object + type: array + mode: + description: The access mode to the disk resource. + By default a disk is attached in READ_WRITE mode. + type: string + name: + description: When set can be later used to change + DiskSpec of actual disk. + type: string + type: object + type: array + serviceAccountId: + description: The ID of the service account authorized for + this instance. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the instance + group. + type: object + x-kubernetes-map-type: granular + loadBalancer: + description: Load balancing specifications. The structure is documented + below. + items: + properties: + ignoreHealthChecks: + description: Do not wait load balancer health checks. + type: boolean + maxOpeningTrafficDuration: + description: Timeout for waiting for the VM to be checked + by the load balancer. If the timeout is exceeded, the + VM will be turned off based on the deployment policy. + Specified in seconds. + type: number + targetGroupDescription: + description: A description of the target group. + type: string + targetGroupLabels: + additionalProperties: + type: string + description: A set of key/value label pairs. + type: object + x-kubernetes-map-type: granular + targetGroupName: + description: The name of the target group. + type: string + type: object + type: array + maxCheckingHealthDuration: + description: Timeout for waiting for the VM to become healthy. + If the timeout is exceeded, the VM will be turned off based + on the deployment policy. Specified in seconds. + type: number + name: + description: The name of the instance group. + type: string + scalePolicy: + description: The scaling policy of the instance group. The structure + is documented below. + items: + properties: + autoScale: + description: The auto scaling policy of the instance group. + The structure is documented below. + items: + properties: + autoScaleType: + description: . Autoscale type, can be ZONAL or REGIONAL. + By default ZONAL type is used. + type: string + cpuUtilizationTarget: + description: Target CPU load level. + type: number + customRule: + description: A list of custom rules. The structure + is documented below. + items: + properties: + folderId: + description: The ID of the folder that the resources + belong to. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs + to assign to the instance group. + type: object + x-kubernetes-map-type: granular + metricName: + description: The name of metric. + type: string + metricType: + description: Metric type, GAUGE or COUNTER. + type: string + ruleType: + description: 'Rule type: UTILIZATION - This + type means that the metric applies to one + instance. First, Instance Groups calculates + the average metric value for each instance, + then averages the values for instances in + one availability zone. This type of metric + must have the instance_id label. WORKLOAD + - This type means that the metric applies + to instances in one availability zone. This + type of metric must have the zone_id label.' + type: string + service: + description: Service of custom metric in Yandex + Monitoring that should be used for scaling. + type: string + target: + description: Target metric value level. + type: number + type: object + type: array + initialSize: + description: The initial number of instances in the + instance group. + type: number + maxSize: + description: The maximum number of virtual machines + in the group. + type: number + measurementDuration: + description: The amount of time, in seconds, that + metrics are averaged for. If the average value at + the end of the interval is higher than the cpu_utilization_target, + the instance group will increase the number of virtual + machines in the group. + type: number + minZoneSize: + description: The minimum number of virtual machines + in a single availability zone. + type: number + stabilizationDuration: + description: The minimum time interval, in seconds, + to monitor the load before an instance group can + reduce the number of virtual machines in the group. + During this time, the group will not decrease even + if the average load falls below the value of cpu_utilization_target. + type: number + warmupDuration: + description: The warm-up time of the virtual machine, + in seconds. During this time, traffic is fed to + the virtual machine, but load metrics are not taken + into account. + type: number + type: object + type: array + fixedScale: + description: The fixed scaling policy of the instance group. + The structure is documented below. + items: + properties: + size: + description: The size of the disk in GB. + type: number + type: object + type: array + testAutoScale: + description: The test auto scaling policy of the instance + group. Use it to test how the auto scale works. The structure + is documented below. + items: + properties: + autoScaleType: + description: . Autoscale type, can be ZONAL or REGIONAL. + By default ZONAL type is used. + type: string + cpuUtilizationTarget: + description: Target CPU load level. + type: number + customRule: + description: A list of custom rules. The structure + is documented below. + items: + properties: + folderId: + description: The ID of the folder that the resources + belong to. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs + to assign to the instance group. + type: object + x-kubernetes-map-type: granular + metricName: + description: The name of metric. + type: string + metricType: + description: Metric type, GAUGE or COUNTER. + type: string + ruleType: + description: 'Rule type: UTILIZATION - This + type means that the metric applies to one + instance. First, Instance Groups calculates + the average metric value for each instance, + then averages the values for instances in + one availability zone. This type of metric + must have the instance_id label. WORKLOAD + - This type means that the metric applies + to instances in one availability zone. This + type of metric must have the zone_id label.' + type: string + service: + description: Service of custom metric in Yandex + Monitoring that should be used for scaling. + type: string + target: + description: Target metric value level. + type: number + type: object + type: array + initialSize: + description: The initial number of instances in the + instance group. + type: number + maxSize: + description: The maximum number of virtual machines + in the group. + type: number + measurementDuration: + description: The amount of time, in seconds, that + metrics are averaged for. If the average value at + the end of the interval is higher than the cpu_utilization_target, + the instance group will increase the number of virtual + machines in the group. + type: number + minZoneSize: + description: The minimum number of virtual machines + in a single availability zone. + type: number + stabilizationDuration: + description: The minimum time interval, in seconds, + to monitor the load before an instance group can + reduce the number of virtual machines in the group. + During this time, the group will not decrease even + if the average load falls below the value of cpu_utilization_target. + type: number + warmupDuration: + description: The warm-up time of the virtual machine, + in seconds. During this time, traffic is fed to + the virtual machine, but load metrics are not taken + into account. + type: number + type: object + type: array + type: object + type: array + serviceAccountId: + description: The ID of the service account authorized for this + instance group. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + variables: + additionalProperties: + type: string + description: A set of key/value variables pairs to assign to the + instance group. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allocationPolicy: + description: The allocation policy of the instance group by zone + and region. The structure is documented below. + items: + properties: + instanceTagsPool: + description: Array of availability zone IDs with list of + instance tags. + items: + properties: + tags: + description: List of tags for instances in zone. + items: + type: string + type: array + zone: + description: Availability zone. + type: string + type: object + type: array + zones: + description: A list of availability zones. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + applicationLoadBalancer: + description: Application Load balancing (L7) specifications. The + structure is documented below. + items: + properties: + ignoreHealthChecks: + description: Do not wait load balancer health checks. + type: boolean + maxOpeningTrafficDuration: + description: Timeout for waiting for the VM to be checked + by the load balancer. If the timeout is exceeded, the + VM will be turned off based on the deployment policy. + Specified in seconds. + type: number + targetGroupDescription: + description: A description of the target group. + type: string + targetGroupLabels: + additionalProperties: + type: string + description: A set of key/value label pairs. + type: object + x-kubernetes-map-type: granular + targetGroupName: + description: The name of the target group. + type: string + type: object + type: array + deletionProtection: + description: Flag that protects the instance group from accidental + deletion. + type: boolean + deployPolicy: + description: The deployment policy of the instance group. The + structure is documented below. + items: + properties: + maxCreating: + description: The maximum number of instances that can be + created at the same time. + type: number + maxDeleting: + description: The maximum number of instances that can be + deleted at the same time. + type: number + maxExpansion: + description: The maximum number of instances that can be + temporarily allocated above the group's target size during + the update process. + type: number + maxUnavailable: + description: The maximum number of running instances that + can be taken offline (stopped or deleted) at the same + time during the update process. + type: number + startupDuration: + description: The amount of time in seconds to allow for + an instance to start. Instance will be considered up and + running (and start receiving traffic) only after the startup_duration + has elapsed and all health checks are passed. + type: number + strategy: + description: Affects the lifecycle of the instance during + deployment. If set to proactive (default), Instance Groups + can forcefully stop a running instance. If opportunistic, + Instance Groups does not stop a running instance. Instead, + it will wait until the instance stops itself or becomes + unhealthy. + type: string + type: object + type: array + description: + description: A description of the instance group. + type: string + folderId: + description: The ID of the folder that the resources belong to. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + healthCheck: + description: Health check specifications. The structure is documented + below. + items: + properties: + healthyThreshold: + description: The number of successful health checks before + the managed instance is declared healthy. + type: number + httpOptions: + description: HTTP check options. The structure is documented + below. + items: + properties: + path: + description: The URL path used for health check requests. + type: string + port: + description: The port used for TCP health checks. + type: number + type: object + type: array + interval: + description: The interval to wait between health checks + in seconds. + type: number + tcpOptions: + description: TCP check options. The structure is documented + below. + items: + properties: + port: + description: The port used for TCP health checks. + type: number + type: object + type: array + timeout: + description: The length of time to wait for a response before + the health check times out in seconds. + type: number + unhealthyThreshold: + description: The number of failed health checks before the + managed instance is declared unhealthy. + type: number + type: object + type: array + instanceTemplate: + description: The template for creating new instances. The structure + is documented below. + items: + properties: + bootDisk: + description: Boot disk specifications for the instance. + The structure is documented below. + items: + properties: + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + diskId: + description: ID of the existing disk. To set use variables. + type: string + initializeParams: + description: Parameters used for creating a disk alongside + the instance. The structure is documented below. + items: + properties: + description: + description: A description of the instance. + type: string + imageId: + description: The disk image to initialize this + disk from. + type: string + imageIdRef: + description: Reference to a Image to populate + imageId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageIdSelector: + description: Selector for a Image to populate + imageId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + size: + description: The size of the disk in GB. + type: number + snapshotId: + description: The snapshot to initialize this + disk from. + type: string + type: + description: Network acceleration type. By default + a network is in STANDARD mode. + type: string + type: object + type: array + mode: + description: The access mode to the disk resource. + By default a disk is attached in READ_WRITE mode. + type: string + name: + description: When set can be later used to change + DiskSpec of actual disk. + type: string + type: object + type: array + description: + description: A description of the instance. + type: string + filesystem: + description: List of filesystems to attach to the instance. + The structure is documented below. + items: + properties: + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + filesystemId: + description: ID of the filesystem that should be attached. + type: string + mode: + description: The access mode to the disk resource. + By default a disk is attached in READ_WRITE mode. + type: string + type: object + type: array + hostname: + description: |- + Hostname template for the instance. + This field is used to generate the FQDN value of instance. + The hostname must be unique within the network and region. + If not specified, the hostname will be equal to id of the instance + and FQDN will be .auto.internal. Otherwise FQDN will be ..internal. + In order to be unique it must contain at least on of instance unique placeholders: + {instance.short_id} + {instance.index} + combination of {instance.zone_id} and {instance.index_in_zone} + Example: my-instance-{instance.index} + If not set, name value will be used + It may also contain another placeholders, see metadata doc for full list. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to + the instance. + type: object + x-kubernetes-map-type: granular + metadata: + additionalProperties: + type: string + description: A set of metadata key/value pairs to make available + from within the instance. + type: object + x-kubernetes-map-type: granular + metadataOptions: + description: Options allow user to configure access to managed + instances metadata + items: + properties: + awsV1HttpEndpoint: + type: number + awsV1HttpToken: + type: number + gceHttpEndpoint: + type: number + gceHttpToken: + type: number + type: object + type: array + name: + description: |- + Name template of the instance. + In order to be unique it must contain at least one of instance unique placeholders: + {instance.short_id} + {instance.index} + combination of {instance.zone_id} and {instance.index_in_zone} + Example: my-instance-{instance.index} + If not set, default is used: {instance_group.id}-{instance.short_id} + It may also contain another placeholders, see metadata doc for full list. + type: string + networkInterface: + description: Network specifications for the instance. This + can be used multiple times for adding multiple interfaces. + The structure is documented below. + items: + properties: + dnsRecord: + description: List of dns records. The structure is + documented below. + items: + properties: + dnsZoneId: + description: DNS zone id (if not set, private + zone used). + type: string + fqdn: + description: DNS record fqdn (must have dot + at the end). + type: string + ptr: + description: When set to true, also create PTR + DNS record. + type: boolean + ttl: + description: DNS record TTL. + type: number + type: object + type: array + ipAddress: + description: Manual set static IP address. + type: string + ipv4: + description: True if IPv4 address allocated for the + network interface. + type: boolean + ipv6: + type: boolean + ipv6Address: + description: Manual set static IPv6 address. + type: string + ipv6DnsRecord: + description: List of ipv6 dns records. The structure + is documented below. + items: + properties: + dnsZoneId: + description: DNS zone id (if not set, private + zone used). + type: string + fqdn: + description: DNS record fqdn (must have dot + at the end). + type: string + ptr: + description: When set to true, also create PTR + DNS record. + type: boolean + ttl: + description: DNS record TTL. + type: number + type: object + type: array + nat: + description: Flag for using NAT. + type: boolean + natDnsRecord: + description: List of nat dns records. The structure + is documented below. + items: + properties: + dnsZoneId: + description: DNS zone id (if not set, private + zone used). + type: string + fqdn: + description: DNS record fqdn (must have dot + at the end). + type: string + ptr: + description: When set to true, also create PTR + DNS record. + type: boolean + ttl: + description: DNS record TTL. + type: number + type: object + type: array + natIpAddress: + description: A public address that can be used to + access the internet over NAT. Use variables to set. + type: string + networkId: + description: The ID of the network. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate + networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate + networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to + populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The ID of the subnets to attach this + interface to. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in vpc to populate + subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in vpc + to populate subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + networkSettings: + description: Network acceleration type for instance. The + structure is documented below. + items: + properties: + type: + description: Network acceleration type. By default + a network is in STANDARD mode. + type: string + type: object + type: array + placementPolicy: + description: The placement policy configuration. The structure + is documented below. + items: + properties: + placementGroupId: + description: Specifies the id of the Placement Group + to assign to the instances. + type: string + type: object + type: array + platformId: + description: The ID of the hardware platform configuration + for the instance. The default is 'standard-v1'. + type: string + resources: + description: Compute resource specifications for the instance. + The structure is documented below. + items: + properties: + coreFraction: + description: If provided, specifies baseline core + performance as a percent. + type: number + cores: + description: The number of CPU cores for the instance. + type: number + gpus: + type: number + memory: + description: The memory size in GB. + type: number + type: object + type: array + schedulingPolicy: + description: The scheduling policy configuration. The structure + is documented below. + items: + properties: + preemptible: + description: Specifies if the instance is preemptible. + Defaults to false. + type: boolean + type: object + type: array + secondaryDisk: + description: A list of disks to attach to the instance. + The structure is documented below. + items: + properties: + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + diskId: + description: ID of the existing disk. To set use variables. + type: string + initializeParams: + description: Parameters used for creating a disk alongside + the instance. The structure is documented below. + items: + properties: + description: + description: A description of the instance. + type: string + imageId: + description: The disk image to initialize this + disk from. + type: string + size: + description: The size of the disk in GB. + type: number + snapshotId: + description: The snapshot to initialize this + disk from. + type: string + type: + description: Network acceleration type. By default + a network is in STANDARD mode. + type: string + type: object + type: array + mode: + description: The access mode to the disk resource. + By default a disk is attached in READ_WRITE mode. + type: string + name: + description: When set can be later used to change + DiskSpec of actual disk. + type: string + type: object + type: array + serviceAccountId: + description: The ID of the service account authorized for + this instance. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the instance + group. + type: object + x-kubernetes-map-type: granular + loadBalancer: + description: Load balancing specifications. The structure is documented + below. + items: + properties: + ignoreHealthChecks: + description: Do not wait load balancer health checks. + type: boolean + maxOpeningTrafficDuration: + description: Timeout for waiting for the VM to be checked + by the load balancer. If the timeout is exceeded, the + VM will be turned off based on the deployment policy. + Specified in seconds. + type: number + targetGroupDescription: + description: A description of the target group. + type: string + targetGroupLabels: + additionalProperties: + type: string + description: A set of key/value label pairs. + type: object + x-kubernetes-map-type: granular + targetGroupName: + description: The name of the target group. + type: string + type: object + type: array + maxCheckingHealthDuration: + description: Timeout for waiting for the VM to become healthy. + If the timeout is exceeded, the VM will be turned off based + on the deployment policy. Specified in seconds. + type: number + name: + description: The name of the instance group. + type: string + scalePolicy: + description: The scaling policy of the instance group. The structure + is documented below. + items: + properties: + autoScale: + description: The auto scaling policy of the instance group. + The structure is documented below. + items: + properties: + autoScaleType: + description: . Autoscale type, can be ZONAL or REGIONAL. + By default ZONAL type is used. + type: string + cpuUtilizationTarget: + description: Target CPU load level. + type: number + customRule: + description: A list of custom rules. The structure + is documented below. + items: + properties: + folderId: + description: The ID of the folder that the resources + belong to. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs + to assign to the instance group. + type: object + x-kubernetes-map-type: granular + metricName: + description: The name of metric. + type: string + metricType: + description: Metric type, GAUGE or COUNTER. + type: string + ruleType: + description: 'Rule type: UTILIZATION - This + type means that the metric applies to one + instance. First, Instance Groups calculates + the average metric value for each instance, + then averages the values for instances in + one availability zone. This type of metric + must have the instance_id label. WORKLOAD + - This type means that the metric applies + to instances in one availability zone. This + type of metric must have the zone_id label.' + type: string + service: + description: Service of custom metric in Yandex + Monitoring that should be used for scaling. + type: string + target: + description: Target metric value level. + type: number + type: object + type: array + initialSize: + description: The initial number of instances in the + instance group. + type: number + maxSize: + description: The maximum number of virtual machines + in the group. + type: number + measurementDuration: + description: The amount of time, in seconds, that + metrics are averaged for. If the average value at + the end of the interval is higher than the cpu_utilization_target, + the instance group will increase the number of virtual + machines in the group. + type: number + minZoneSize: + description: The minimum number of virtual machines + in a single availability zone. + type: number + stabilizationDuration: + description: The minimum time interval, in seconds, + to monitor the load before an instance group can + reduce the number of virtual machines in the group. + During this time, the group will not decrease even + if the average load falls below the value of cpu_utilization_target. + type: number + warmupDuration: + description: The warm-up time of the virtual machine, + in seconds. During this time, traffic is fed to + the virtual machine, but load metrics are not taken + into account. + type: number + type: object + type: array + fixedScale: + description: The fixed scaling policy of the instance group. + The structure is documented below. + items: + properties: + size: + description: The size of the disk in GB. + type: number + type: object + type: array + testAutoScale: + description: The test auto scaling policy of the instance + group. Use it to test how the auto scale works. The structure + is documented below. + items: + properties: + autoScaleType: + description: . Autoscale type, can be ZONAL or REGIONAL. + By default ZONAL type is used. + type: string + cpuUtilizationTarget: + description: Target CPU load level. + type: number + customRule: + description: A list of custom rules. The structure + is documented below. + items: + properties: + folderId: + description: The ID of the folder that the resources + belong to. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs + to assign to the instance group. + type: object + x-kubernetes-map-type: granular + metricName: + description: The name of metric. + type: string + metricType: + description: Metric type, GAUGE or COUNTER. + type: string + ruleType: + description: 'Rule type: UTILIZATION - This + type means that the metric applies to one + instance. First, Instance Groups calculates + the average metric value for each instance, + then averages the values for instances in + one availability zone. This type of metric + must have the instance_id label. WORKLOAD + - This type means that the metric applies + to instances in one availability zone. This + type of metric must have the zone_id label.' + type: string + service: + description: Service of custom metric in Yandex + Monitoring that should be used for scaling. + type: string + target: + description: Target metric value level. + type: number + type: object + type: array + initialSize: + description: The initial number of instances in the + instance group. + type: number + maxSize: + description: The maximum number of virtual machines + in the group. + type: number + measurementDuration: + description: The amount of time, in seconds, that + metrics are averaged for. If the average value at + the end of the interval is higher than the cpu_utilization_target, + the instance group will increase the number of virtual + machines in the group. + type: number + minZoneSize: + description: The minimum number of virtual machines + in a single availability zone. + type: number + stabilizationDuration: + description: The minimum time interval, in seconds, + to monitor the load before an instance group can + reduce the number of virtual machines in the group. + During this time, the group will not decrease even + if the average load falls below the value of cpu_utilization_target. + type: number + warmupDuration: + description: The warm-up time of the virtual machine, + in seconds. During this time, traffic is fed to + the virtual machine, but load metrics are not taken + into account. + type: number + type: object + type: array + type: object + type: array + serviceAccountId: + description: The ID of the service account authorized for this + instance group. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + variables: + additionalProperties: + type: string + description: A set of key/value variables pairs to assign to the + instance group. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.allocationPolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.allocationPolicy) + || (has(self.initProvider) && has(self.initProvider.allocationPolicy))' + - message: spec.forProvider.deployPolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.deployPolicy) + || (has(self.initProvider) && has(self.initProvider.deployPolicy))' + - message: spec.forProvider.instanceTemplate is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instanceTemplate) + || (has(self.initProvider) && has(self.initProvider.instanceTemplate))' + - message: spec.forProvider.scalePolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scalePolicy) + || (has(self.initProvider) && has(self.initProvider.scalePolicy))' + status: + description: InstanceGroupStatus defines the observed state of InstanceGroup. + properties: + atProvider: + properties: + allocationPolicy: + description: The allocation policy of the instance group by zone + and region. The structure is documented below. + items: + properties: + instanceTagsPool: + description: Array of availability zone IDs with list of + instance tags. + items: + properties: + tags: + description: List of tags for instances in zone. + items: + type: string + type: array + zone: + description: Availability zone. + type: string + type: object + type: array + zones: + description: A list of availability zones. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + applicationLoadBalancer: + description: Application Load balancing (L7) specifications. The + structure is documented below. + items: + properties: + ignoreHealthChecks: + description: Do not wait load balancer health checks. + type: boolean + maxOpeningTrafficDuration: + description: Timeout for waiting for the VM to be checked + by the load balancer. If the timeout is exceeded, the + VM will be turned off based on the deployment policy. + Specified in seconds. + type: number + statusMessage: + description: The status message of the target group. + type: string + targetGroupDescription: + description: A description of the target group. + type: string + targetGroupId: + description: The ID of the target group. + type: string + targetGroupLabels: + additionalProperties: + type: string + description: A set of key/value label pairs. + type: object + x-kubernetes-map-type: granular + targetGroupName: + description: The name of the target group. + type: string + type: object + type: array + createdAt: + description: The instance group creation timestamp. + type: string + deletionProtection: + description: Flag that protects the instance group from accidental + deletion. + type: boolean + deployPolicy: + description: The deployment policy of the instance group. The + structure is documented below. + items: + properties: + maxCreating: + description: The maximum number of instances that can be + created at the same time. + type: number + maxDeleting: + description: The maximum number of instances that can be + deleted at the same time. + type: number + maxExpansion: + description: The maximum number of instances that can be + temporarily allocated above the group's target size during + the update process. + type: number + maxUnavailable: + description: The maximum number of running instances that + can be taken offline (stopped or deleted) at the same + time during the update process. + type: number + startupDuration: + description: The amount of time in seconds to allow for + an instance to start. Instance will be considered up and + running (and start receiving traffic) only after the startup_duration + has elapsed and all health checks are passed. + type: number + strategy: + description: Affects the lifecycle of the instance during + deployment. If set to proactive (default), Instance Groups + can forcefully stop a running instance. If opportunistic, + Instance Groups does not stop a running instance. Instead, + it will wait until the instance stops itself or becomes + unhealthy. + type: string + type: object + type: array + description: + description: A description of the instance group. + type: string + folderId: + description: The ID of the folder that the resources belong to. + type: string + healthCheck: + description: Health check specifications. The structure is documented + below. + items: + properties: + healthyThreshold: + description: The number of successful health checks before + the managed instance is declared healthy. + type: number + httpOptions: + description: HTTP check options. The structure is documented + below. + items: + properties: + path: + description: The URL path used for health check requests. + type: string + port: + description: The port used for TCP health checks. + type: number + type: object + type: array + interval: + description: The interval to wait between health checks + in seconds. + type: number + tcpOptions: + description: TCP check options. The structure is documented + below. + items: + properties: + port: + description: The port used for TCP health checks. + type: number + type: object + type: array + timeout: + description: The length of time to wait for a response before + the health check times out in seconds. + type: number + unhealthyThreshold: + description: The number of failed health checks before the + managed instance is declared unhealthy. + type: number + type: object + type: array + id: + description: The ID of the instance group. + type: string + instanceTemplate: + description: The template for creating new instances. The structure + is documented below. + items: + properties: + bootDisk: + description: Boot disk specifications for the instance. + The structure is documented below. + items: + properties: + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + diskId: + description: ID of the existing disk. To set use variables. + type: string + initializeParams: + description: Parameters used for creating a disk alongside + the instance. The structure is documented below. + items: + properties: + description: + description: A description of the instance. + type: string + imageId: + description: The disk image to initialize this + disk from. + type: string + size: + description: The size of the disk in GB. + type: number + snapshotId: + description: The snapshot to initialize this + disk from. + type: string + type: + description: Network acceleration type. By default + a network is in STANDARD mode. + type: string + type: object + type: array + mode: + description: The access mode to the disk resource. + By default a disk is attached in READ_WRITE mode. + type: string + name: + description: When set can be later used to change + DiskSpec of actual disk. + type: string + type: object + type: array + description: + description: A description of the instance. + type: string + filesystem: + description: List of filesystems to attach to the instance. + The structure is documented below. + items: + properties: + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + filesystemId: + description: ID of the filesystem that should be attached. + type: string + mode: + description: The access mode to the disk resource. + By default a disk is attached in READ_WRITE mode. + type: string + type: object + type: array + hostname: + description: |- + Hostname template for the instance. + This field is used to generate the FQDN value of instance. + The hostname must be unique within the network and region. + If not specified, the hostname will be equal to id of the instance + and FQDN will be .auto.internal. Otherwise FQDN will be ..internal. + In order to be unique it must contain at least on of instance unique placeholders: + {instance.short_id} + {instance.index} + combination of {instance.zone_id} and {instance.index_in_zone} + Example: my-instance-{instance.index} + If not set, name value will be used + It may also contain another placeholders, see metadata doc for full list. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to + the instance. + type: object + x-kubernetes-map-type: granular + metadata: + additionalProperties: + type: string + description: A set of metadata key/value pairs to make available + from within the instance. + type: object + x-kubernetes-map-type: granular + metadataOptions: + description: Options allow user to configure access to managed + instances metadata + items: + properties: + awsV1HttpEndpoint: + type: number + awsV1HttpToken: + type: number + gceHttpEndpoint: + type: number + gceHttpToken: + type: number + type: object + type: array + name: + description: |- + Name template of the instance. + In order to be unique it must contain at least one of instance unique placeholders: + {instance.short_id} + {instance.index} + combination of {instance.zone_id} and {instance.index_in_zone} + Example: my-instance-{instance.index} + If not set, default is used: {instance_group.id}-{instance.short_id} + It may also contain another placeholders, see metadata doc for full list. + type: string + networkInterface: + description: Network specifications for the instance. This + can be used multiple times for adding multiple interfaces. + The structure is documented below. + items: + properties: + dnsRecord: + description: List of dns records. The structure is + documented below. + items: + properties: + dnsZoneId: + description: DNS zone id (if not set, private + zone used). + type: string + fqdn: + description: DNS record fqdn (must have dot + at the end). + type: string + ptr: + description: When set to true, also create PTR + DNS record. + type: boolean + ttl: + description: DNS record TTL. + type: number + type: object + type: array + ipAddress: + description: Manual set static IP address. + type: string + ipv4: + description: True if IPv4 address allocated for the + network interface. + type: boolean + ipv6: + type: boolean + ipv6Address: + description: Manual set static IPv6 address. + type: string + ipv6DnsRecord: + description: List of ipv6 dns records. The structure + is documented below. + items: + properties: + dnsZoneId: + description: DNS zone id (if not set, private + zone used). + type: string + fqdn: + description: DNS record fqdn (must have dot + at the end). + type: string + ptr: + description: When set to true, also create PTR + DNS record. + type: boolean + ttl: + description: DNS record TTL. + type: number + type: object + type: array + nat: + description: Flag for using NAT. + type: boolean + natDnsRecord: + description: List of nat dns records. The structure + is documented below. + items: + properties: + dnsZoneId: + description: DNS zone id (if not set, private + zone used). + type: string + fqdn: + description: DNS record fqdn (must have dot + at the end). + type: string + ptr: + description: When set to true, also create PTR + DNS record. + type: boolean + ttl: + description: DNS record TTL. + type: number + type: object + type: array + natIpAddress: + description: A public address that can be used to + access the internet over NAT. Use variables to set. + type: string + networkId: + description: The ID of the network. + type: string + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The ID of the subnets to attach this + interface to. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + networkSettings: + description: Network acceleration type for instance. The + structure is documented below. + items: + properties: + type: + description: Network acceleration type. By default + a network is in STANDARD mode. + type: string + type: object + type: array + placementPolicy: + description: The placement policy configuration. The structure + is documented below. + items: + properties: + placementGroupId: + description: Specifies the id of the Placement Group + to assign to the instances. + type: string + type: object + type: array + platformId: + description: The ID of the hardware platform configuration + for the instance. The default is 'standard-v1'. + type: string + resources: + description: Compute resource specifications for the instance. + The structure is documented below. + items: + properties: + coreFraction: + description: If provided, specifies baseline core + performance as a percent. + type: number + cores: + description: The number of CPU cores for the instance. + type: number + gpus: + type: number + memory: + description: The memory size in GB. + type: number + type: object + type: array + schedulingPolicy: + description: The scheduling policy configuration. The structure + is documented below. + items: + properties: + preemptible: + description: Specifies if the instance is preemptible. + Defaults to false. + type: boolean + type: object + type: array + secondaryDisk: + description: A list of disks to attach to the instance. + The structure is documented below. + items: + properties: + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + diskId: + description: ID of the existing disk. To set use variables. + type: string + initializeParams: + description: Parameters used for creating a disk alongside + the instance. The structure is documented below. + items: + properties: + description: + description: A description of the instance. + type: string + imageId: + description: The disk image to initialize this + disk from. + type: string + size: + description: The size of the disk in GB. + type: number + snapshotId: + description: The snapshot to initialize this + disk from. + type: string + type: + description: Network acceleration type. By default + a network is in STANDARD mode. + type: string + type: object + type: array + mode: + description: The access mode to the disk resource. + By default a disk is attached in READ_WRITE mode. + type: string + name: + description: When set can be later used to change + DiskSpec of actual disk. + type: string + type: object + type: array + serviceAccountId: + description: The ID of the service account authorized for + this instance. + type: string + type: object + type: array + instances: + items: + properties: + fqdn: + description: The Fully Qualified Domain Name. + type: string + instanceId: + description: The ID of the instance. + type: string + instanceTag: + type: string + name: + description: The name of the managed instance. + type: string + networkInterface: + description: An array with the network interfaces attached + to the managed instance. + items: + properties: + index: + description: The index of the network interface as + generated by the server. + type: number + ipAddress: + description: The private IP address to assign to the + instance. If empty, the address is automatically + assigned from the specified subnet. + type: string + ipv4: + description: True if IPv4 address allocated for the + network interface. + type: boolean + ipv6: + type: boolean + ipv6Address: + description: Manual set static IPv6 address. + type: string + macAddress: + description: The MAC address assigned to the network + interface. + type: string + nat: + description: The instance's public address for accessing + the internet over NAT. + type: boolean + natIpAddress: + description: The public IP address of the instance. + type: string + natIpVersion: + description: The IP version for the public address. + type: string + subnetId: + description: The ID of the subnet to attach this interface + to. The subnet must reside in the same zone where + this instance was created. + type: string + type: object + type: array + status: + description: The status of the instance. + type: string + statusChangedAt: + type: string + statusMessage: + description: The status message of the instance. + type: string + zoneId: + description: The ID of the availability zone where the instance + resides. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the instance + group. + type: object + x-kubernetes-map-type: granular + loadBalancer: + description: Load balancing specifications. The structure is documented + below. + items: + properties: + ignoreHealthChecks: + description: Do not wait load balancer health checks. + type: boolean + maxOpeningTrafficDuration: + description: Timeout for waiting for the VM to be checked + by the load balancer. If the timeout is exceeded, the + VM will be turned off based on the deployment policy. + Specified in seconds. + type: number + statusMessage: + description: The status message of the target group. + type: string + targetGroupDescription: + description: A description of the target group. + type: string + targetGroupId: + description: The ID of the target group. + type: string + targetGroupLabels: + additionalProperties: + type: string + description: A set of key/value label pairs. + type: object + x-kubernetes-map-type: granular + targetGroupName: + description: The name of the target group. + type: string + type: object + type: array + maxCheckingHealthDuration: + description: Timeout for waiting for the VM to become healthy. + If the timeout is exceeded, the VM will be turned off based + on the deployment policy. Specified in seconds. + type: number + name: + description: The name of the instance group. + type: string + scalePolicy: + description: The scaling policy of the instance group. The structure + is documented below. + items: + properties: + autoScale: + description: The auto scaling policy of the instance group. + The structure is documented below. + items: + properties: + autoScaleType: + description: . Autoscale type, can be ZONAL or REGIONAL. + By default ZONAL type is used. + type: string + cpuUtilizationTarget: + description: Target CPU load level. + type: number + customRule: + description: A list of custom rules. The structure + is documented below. + items: + properties: + folderId: + description: The ID of the folder that the resources + belong to. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs + to assign to the instance group. + type: object + x-kubernetes-map-type: granular + metricName: + description: The name of metric. + type: string + metricType: + description: Metric type, GAUGE or COUNTER. + type: string + ruleType: + description: 'Rule type: UTILIZATION - This + type means that the metric applies to one + instance. First, Instance Groups calculates + the average metric value for each instance, + then averages the values for instances in + one availability zone. This type of metric + must have the instance_id label. WORKLOAD + - This type means that the metric applies + to instances in one availability zone. This + type of metric must have the zone_id label.' + type: string + service: + description: Service of custom metric in Yandex + Monitoring that should be used for scaling. + type: string + target: + description: Target metric value level. + type: number + type: object + type: array + initialSize: + description: The initial number of instances in the + instance group. + type: number + maxSize: + description: The maximum number of virtual machines + in the group. + type: number + measurementDuration: + description: The amount of time, in seconds, that + metrics are averaged for. If the average value at + the end of the interval is higher than the cpu_utilization_target, + the instance group will increase the number of virtual + machines in the group. + type: number + minZoneSize: + description: The minimum number of virtual machines + in a single availability zone. + type: number + stabilizationDuration: + description: The minimum time interval, in seconds, + to monitor the load before an instance group can + reduce the number of virtual machines in the group. + During this time, the group will not decrease even + if the average load falls below the value of cpu_utilization_target. + type: number + warmupDuration: + description: The warm-up time of the virtual machine, + in seconds. During this time, traffic is fed to + the virtual machine, but load metrics are not taken + into account. + type: number + type: object + type: array + fixedScale: + description: The fixed scaling policy of the instance group. + The structure is documented below. + items: + properties: + size: + description: The size of the disk in GB. + type: number + type: object + type: array + testAutoScale: + description: The test auto scaling policy of the instance + group. Use it to test how the auto scale works. The structure + is documented below. + items: + properties: + autoScaleType: + description: . Autoscale type, can be ZONAL or REGIONAL. + By default ZONAL type is used. + type: string + cpuUtilizationTarget: + description: Target CPU load level. + type: number + customRule: + description: A list of custom rules. The structure + is documented below. + items: + properties: + folderId: + description: The ID of the folder that the resources + belong to. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs + to assign to the instance group. + type: object + x-kubernetes-map-type: granular + metricName: + description: The name of metric. + type: string + metricType: + description: Metric type, GAUGE or COUNTER. + type: string + ruleType: + description: 'Rule type: UTILIZATION - This + type means that the metric applies to one + instance. First, Instance Groups calculates + the average metric value for each instance, + then averages the values for instances in + one availability zone. This type of metric + must have the instance_id label. WORKLOAD + - This type means that the metric applies + to instances in one availability zone. This + type of metric must have the zone_id label.' + type: string + service: + description: Service of custom metric in Yandex + Monitoring that should be used for scaling. + type: string + target: + description: Target metric value level. + type: number + type: object + type: array + initialSize: + description: The initial number of instances in the + instance group. + type: number + maxSize: + description: The maximum number of virtual machines + in the group. + type: number + measurementDuration: + description: The amount of time, in seconds, that + metrics are averaged for. If the average value at + the end of the interval is higher than the cpu_utilization_target, + the instance group will increase the number of virtual + machines in the group. + type: number + minZoneSize: + description: The minimum number of virtual machines + in a single availability zone. + type: number + stabilizationDuration: + description: The minimum time interval, in seconds, + to monitor the load before an instance group can + reduce the number of virtual machines in the group. + During this time, the group will not decrease even + if the average load falls below the value of cpu_utilization_target. + type: number + warmupDuration: + description: The warm-up time of the virtual machine, + in seconds. During this time, traffic is fed to + the virtual machine, but load metrics are not taken + into account. + type: number + type: object + type: array + type: object + type: array + serviceAccountId: + description: The ID of the service account authorized for this + instance group. + type: string + status: + description: The status of the instance. + type: string + variables: + additionalProperties: + type: string + description: A set of key/value variables pairs to assign to the + instance group. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_instanceiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_instanceiambindings.yaml new file mode 100644 index 0000000..122500d --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_instanceiambindings.yaml @@ -0,0 +1,384 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: instanceiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: InstanceIAMBinding + listKind: InstanceIAMBindingList + plural: instanceiambindings + singular: instanceiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: InstanceIAMBinding is the Schema for the InstanceIAMBindings + API. Allows management of a single IAM binding for an instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceIAMBindingSpec defines the desired state of InstanceIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + instanceId: + description: ID of the instance to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_instance_iam_binding + can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + instanceId: + description: ID of the instance to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_instance_iam_binding + can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.instanceId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instanceId) + || (has(self.initProvider) && has(self.initProvider.instanceId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: InstanceIAMBindingStatus defines the observed state of InstanceIAMBinding. + properties: + atProvider: + properties: + id: + type: string + instanceId: + description: ID of the instance to attach the policy to. + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_instance_iam_binding + can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_instances.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_instances.yaml new file mode 100644 index 0000000..64d57e0 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_instances.yaml @@ -0,0 +1,2372 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: instances.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Instance + listKind: InstanceList + plural: instances + singular: instance + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Instance is the Schema for the Instances API. Manages a VM instance + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceSpec defines the desired state of Instance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowRecreate: + description: Default 5 minutes + type: boolean + allowStoppingForUpdate: + description: If you try to update a property that requires stopping + the instance without setting this field, the update will fail. + type: boolean + bootDisk: + description: The boot disk for the instance. The structure is + documented below. + items: + properties: + autoDelete: + description: Defines whether the disk will be auto-deleted + when the instance is deleted. The default value is True. + type: boolean + deviceName: + description: Name that can be used to access an attached + disk. + type: string + diskId: + description: The ID of the existing disk (such as those + managed by yandex_compute_disk) to attach as a boot disk. + type: string + diskIdRef: + description: Reference to a Disk to populate diskId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + diskIdSelector: + description: Selector for a Disk to populate diskId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + initializeParams: + description: Parameters for a new disk that will be created + alongside the new instance. Either initialize_params or + disk_id must be set. The structure is documented below. + items: + properties: + blockSize: + description: Block size of the disk, specified in + bytes. + type: number + description: + description: Description of the boot disk. + type: string + imageId: + description: A disk image to initialize this disk + from. + type: string + imageIdRef: + description: Reference to a Image to populate imageId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageIdSelector: + description: Selector for a Image to populate imageId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Resource name. + type: string + size: + description: Size of the disk in GB. + type: number + snapshotId: + description: A snapshot to initialize this disk from. + type: string + type: + description: Disk type. + type: string + type: object + type: array + mode: + description: Type of access to the disk resource. By default, + a disk is attached in READ_WRITE mode. + type: string + type: object + type: array + description: + description: Description of the instance. + type: string + filesystem: + description: List of filesystems that are attached to the instance. + Structure is documented below. + items: + properties: + deviceName: + description: Name of the device representing the filesystem + on the instance. + type: string + filesystemId: + description: ID of the filesystem that should be attached. + type: string + mode: + description: Mode of access to the filesystem that should + be attached. By default, filesystem is attached in READ_WRITE + mode. + type: string + type: object + type: array + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + gpuClusterId: + description: ID of the GPU cluster to attach this instance to. + The GPU cluster must exist in the same zone as the instance. + type: string + hostname: + description: Host name for the instance. This field is used to + generate the instance fqdn value. The host name must be unique + within the network and region. If not specified, the host name + will be equal to id of the instance and fqdn will be .auto.internal. + Otherwise FQDN will be ..internal. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the instance. + type: object + x-kubernetes-map-type: granular + localDisk: + description: List of local disks that are attached to the instance. + Structure is documented below. + items: + properties: + sizeBytes: + description: Size of the disk, specified in bytes. + type: number + type: object + type: array + maintenanceGracePeriod: + description: Time between notification via metadata service and + maintenance. E.g., 60s. + type: string + maintenancePolicy: + description: 'Behaviour on maintenance events. The default is + unspecified. Values: unspecified, migrate, restart.' + type: string + metadata: + additionalProperties: + type: string + description: Metadata key/value pairs to make available from within + the instance. + type: object + x-kubernetes-map-type: granular + metadataOptions: + description: Options allow user to configure access to instance's + metadata + items: + properties: + awsV1HttpEndpoint: + type: number + awsV1HttpToken: + type: number + gceHttpEndpoint: + type: number + gceHttpToken: + type: number + type: object + type: array + name: + description: Resource name. + type: string + networkAccelerationType: + description: 'Type of network acceleration. The default is standard. + Values: standard, software_accelerated' + type: string + networkInterface: + description: Networks to attach to the instance. This can be specified + multiple times. The structure is documented below. + items: + properties: + dnsRecord: + description: List of configurations for creating ipv4 DNS + records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private zone + used). + type: string + fqdn: + description: DNS record FQDN (must have a dot at the + end). + type: string + ptr: + description: When set to true, also create a PTR DNS + record. + type: boolean + ttl: + description: DNS record TTL. in seconds + type: number + type: object + type: array + index: + description: Index of network interface, will be calculated + automatically for instance create or update operations + if not specified. Required for attach/detach operations. + type: number + ipAddress: + description: The private IP address to assign to the instance. + If empty, the address will be automatically assigned from + the specified subnet. + type: string + ipv4: + description: Allocate an IPv4 address for the interface. + The default value is true. + type: boolean + ipv6: + description: If true, allocate an IPv6 address for the interface. + The address will be automatically assigned from the specified + subnet. + type: boolean + ipv6Address: + description: The private IPv6 address to assign to the instance. + type: string + ipv6DnsRecord: + description: List of configurations for creating ipv6 DNS + records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private zone + used). + type: string + fqdn: + description: DNS record FQDN (must have a dot at the + end). + type: string + ptr: + description: When set to true, also create a PTR DNS + record. + type: boolean + ttl: + description: DNS record TTL. in seconds + type: number + type: object + type: array + nat: + description: Provide a public address, for instance, to + access the internet over NAT. + type: boolean + natDnsRecord: + description: List of configurations for creating ipv4 NAT + DNS records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private zone + used). + type: string + fqdn: + description: DNS record FQDN (must have a dot at the + end). + type: string + ptr: + description: When set to true, also create a PTR DNS + record. + type: boolean + ttl: + description: DNS record TTL. in seconds + type: number + type: object + type: array + natIpAddress: + description: Provide a public address, for instance, to + access the internet over NAT. Address should be already + reserved in web UI. + type: string + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc + to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: ID of the subnet to attach this interface to. + The subnet must exist in the same zone where this instance + will be created. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + placementPolicy: + description: The placement policy configuration. The structure + is documented below. + items: + properties: + hostAffinityRules: + description: List of host affinity rules. The structure + is documented below. + items: + properties: + key: + description: Affinity label or one of reserved values + - yc.hostId, yc.hostGroupId. + type: string + op: + description: Affinity action. The only value supported + is IN. + type: string + values: + items: + type: string + type: array + type: object + type: array + placementGroupId: + description: Specifies the id of the Placement Group to + assign to the instance. + type: string + placementGroupPartition: + type: number + type: object + type: array + platformId: + description: The type of virtual machine to create. The default + is 'standard-v1'. + type: string + resources: + description: Compute resources that are allocated for the instance. + The structure is documented below. + items: + properties: + coreFraction: + description: If provided, specifies baseline performance + for a core as a percent. + type: number + cores: + description: CPU cores for the instance. + type: number + gpus: + description: If provided, specifies the number of GPU devices + for the instance + type: number + memory: + description: Memory size in GB. + type: number + type: object + type: array + schedulingPolicy: + description: Scheduling policy configuration. The structure is + documented below. + items: + properties: + preemptible: + description: Specifies if the instance is preemptible. Defaults + to false. + type: boolean + type: object + type: array + secondaryDisk: + description: 'A set of disks to attach to the instance. The structure + is documented below. Note: The allow_stopping_for_update property + must be set to true in order to update this structure.' + items: + properties: + autoDelete: + description: Whether the disk is auto-deleted when the instance + is deleted. The default value is false. + type: boolean + deviceName: + description: Name that can be used to access an attached + disk under /dev/disk/by-id/. + type: string + diskId: + description: ID of the disk that is attached to the instance. + type: string + mode: + description: Type of access to the disk resource. By default, + a disk is attached in READ_WRITE mode. + type: string + type: object + type: array + serviceAccountId: + description: ID of the service account authorized for this instance. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the virtual machine will + be created. If it is not provided, the default provider folder + is used. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowRecreate: + description: Default 5 minutes + type: boolean + allowStoppingForUpdate: + description: If you try to update a property that requires stopping + the instance without setting this field, the update will fail. + type: boolean + bootDisk: + description: The boot disk for the instance. The structure is + documented below. + items: + properties: + autoDelete: + description: Defines whether the disk will be auto-deleted + when the instance is deleted. The default value is True. + type: boolean + deviceName: + description: Name that can be used to access an attached + disk. + type: string + diskId: + description: The ID of the existing disk (such as those + managed by yandex_compute_disk) to attach as a boot disk. + type: string + diskIdRef: + description: Reference to a Disk to populate diskId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + diskIdSelector: + description: Selector for a Disk to populate diskId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + initializeParams: + description: Parameters for a new disk that will be created + alongside the new instance. Either initialize_params or + disk_id must be set. The structure is documented below. + items: + properties: + blockSize: + description: Block size of the disk, specified in + bytes. + type: number + description: + description: Description of the boot disk. + type: string + imageId: + description: A disk image to initialize this disk + from. + type: string + imageIdRef: + description: Reference to a Image to populate imageId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageIdSelector: + description: Selector for a Image to populate imageId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Resource name. + type: string + size: + description: Size of the disk in GB. + type: number + snapshotId: + description: A snapshot to initialize this disk from. + type: string + type: + description: Disk type. + type: string + type: object + type: array + mode: + description: Type of access to the disk resource. By default, + a disk is attached in READ_WRITE mode. + type: string + type: object + type: array + description: + description: Description of the instance. + type: string + filesystem: + description: List of filesystems that are attached to the instance. + Structure is documented below. + items: + properties: + deviceName: + description: Name of the device representing the filesystem + on the instance. + type: string + filesystemId: + description: ID of the filesystem that should be attached. + type: string + mode: + description: Mode of access to the filesystem that should + be attached. By default, filesystem is attached in READ_WRITE + mode. + type: string + type: object + type: array + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + gpuClusterId: + description: ID of the GPU cluster to attach this instance to. + The GPU cluster must exist in the same zone as the instance. + type: string + hostname: + description: Host name for the instance. This field is used to + generate the instance fqdn value. The host name must be unique + within the network and region. If not specified, the host name + will be equal to id of the instance and fqdn will be .auto.internal. + Otherwise FQDN will be ..internal. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the instance. + type: object + x-kubernetes-map-type: granular + localDisk: + description: List of local disks that are attached to the instance. + Structure is documented below. + items: + properties: + sizeBytes: + description: Size of the disk, specified in bytes. + type: number + type: object + type: array + maintenanceGracePeriod: + description: Time between notification via metadata service and + maintenance. E.g., 60s. + type: string + maintenancePolicy: + description: 'Behaviour on maintenance events. The default is + unspecified. Values: unspecified, migrate, restart.' + type: string + metadata: + additionalProperties: + type: string + description: Metadata key/value pairs to make available from within + the instance. + type: object + x-kubernetes-map-type: granular + metadataOptions: + description: Options allow user to configure access to instance's + metadata + items: + properties: + awsV1HttpEndpoint: + type: number + awsV1HttpToken: + type: number + gceHttpEndpoint: + type: number + gceHttpToken: + type: number + type: object + type: array + name: + description: Resource name. + type: string + networkAccelerationType: + description: 'Type of network acceleration. The default is standard. + Values: standard, software_accelerated' + type: string + networkInterface: + description: Networks to attach to the instance. This can be specified + multiple times. The structure is documented below. + items: + properties: + dnsRecord: + description: List of configurations for creating ipv4 DNS + records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private zone + used). + type: string + fqdn: + description: DNS record FQDN (must have a dot at the + end). + type: string + ptr: + description: When set to true, also create a PTR DNS + record. + type: boolean + ttl: + description: DNS record TTL. in seconds + type: number + type: object + type: array + index: + description: Index of network interface, will be calculated + automatically for instance create or update operations + if not specified. Required for attach/detach operations. + type: number + ipAddress: + description: The private IP address to assign to the instance. + If empty, the address will be automatically assigned from + the specified subnet. + type: string + ipv4: + description: Allocate an IPv4 address for the interface. + The default value is true. + type: boolean + ipv6: + description: If true, allocate an IPv6 address for the interface. + The address will be automatically assigned from the specified + subnet. + type: boolean + ipv6Address: + description: The private IPv6 address to assign to the instance. + type: string + ipv6DnsRecord: + description: List of configurations for creating ipv6 DNS + records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private zone + used). + type: string + fqdn: + description: DNS record FQDN (must have a dot at the + end). + type: string + ptr: + description: When set to true, also create a PTR DNS + record. + type: boolean + ttl: + description: DNS record TTL. in seconds + type: number + type: object + type: array + nat: + description: Provide a public address, for instance, to + access the internet over NAT. + type: boolean + natDnsRecord: + description: List of configurations for creating ipv4 NAT + DNS records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private zone + used). + type: string + fqdn: + description: DNS record FQDN (must have a dot at the + end). + type: string + ptr: + description: When set to true, also create a PTR DNS + record. + type: boolean + ttl: + description: DNS record TTL. in seconds + type: number + type: object + type: array + natIpAddress: + description: Provide a public address, for instance, to + access the internet over NAT. Address should be already + reserved in web UI. + type: string + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc + to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: ID of the subnet to attach this interface to. + The subnet must exist in the same zone where this instance + will be created. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + placementPolicy: + description: The placement policy configuration. The structure + is documented below. + items: + properties: + hostAffinityRules: + description: List of host affinity rules. The structure + is documented below. + items: + properties: + key: + description: Affinity label or one of reserved values + - yc.hostId, yc.hostGroupId. + type: string + op: + description: Affinity action. The only value supported + is IN. + type: string + values: + items: + type: string + type: array + type: object + type: array + placementGroupId: + description: Specifies the id of the Placement Group to + assign to the instance. + type: string + placementGroupPartition: + type: number + type: object + type: array + platformId: + description: The type of virtual machine to create. The default + is 'standard-v1'. + type: string + resources: + description: Compute resources that are allocated for the instance. + The structure is documented below. + items: + properties: + coreFraction: + description: If provided, specifies baseline performance + for a core as a percent. + type: number + cores: + description: CPU cores for the instance. + type: number + gpus: + description: If provided, specifies the number of GPU devices + for the instance + type: number + memory: + description: Memory size in GB. + type: number + type: object + type: array + schedulingPolicy: + description: Scheduling policy configuration. The structure is + documented below. + items: + properties: + preemptible: + description: Specifies if the instance is preemptible. Defaults + to false. + type: boolean + type: object + type: array + secondaryDisk: + description: 'A set of disks to attach to the instance. The structure + is documented below. Note: The allow_stopping_for_update property + must be set to true in order to update this structure.' + items: + properties: + autoDelete: + description: Whether the disk is auto-deleted when the instance + is deleted. The default value is false. + type: boolean + deviceName: + description: Name that can be used to access an attached + disk under /dev/disk/by-id/. + type: string + diskId: + description: ID of the disk that is attached to the instance. + type: string + mode: + description: Type of access to the disk resource. By default, + a disk is attached in READ_WRITE mode. + type: string + type: object + type: array + serviceAccountId: + description: ID of the service account authorized for this instance. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the virtual machine will + be created. If it is not provided, the default provider folder + is used. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.bootDisk is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.bootDisk) + || (has(self.initProvider) && has(self.initProvider.bootDisk))' + - message: spec.forProvider.networkInterface is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.networkInterface) + || (has(self.initProvider) && has(self.initProvider.networkInterface))' + - message: spec.forProvider.resources is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resources) + || (has(self.initProvider) && has(self.initProvider.resources))' + status: + description: InstanceStatus defines the observed state of Instance. + properties: + atProvider: + properties: + allowRecreate: + description: Default 5 minutes + type: boolean + allowStoppingForUpdate: + description: If you try to update a property that requires stopping + the instance without setting this field, the update will fail. + type: boolean + bootDisk: + description: The boot disk for the instance. The structure is + documented below. + items: + properties: + autoDelete: + description: Defines whether the disk will be auto-deleted + when the instance is deleted. The default value is True. + type: boolean + deviceName: + description: Name that can be used to access an attached + disk. + type: string + diskId: + description: The ID of the existing disk (such as those + managed by yandex_compute_disk) to attach as a boot disk. + type: string + initializeParams: + description: Parameters for a new disk that will be created + alongside the new instance. Either initialize_params or + disk_id must be set. The structure is documented below. + items: + properties: + blockSize: + description: Block size of the disk, specified in + bytes. + type: number + description: + description: Description of the boot disk. + type: string + imageId: + description: A disk image to initialize this disk + from. + type: string + name: + description: Resource name. + type: string + size: + description: Size of the disk in GB. + type: number + snapshotId: + description: A snapshot to initialize this disk from. + type: string + type: + description: Disk type. + type: string + type: object + type: array + mode: + description: Type of access to the disk resource. By default, + a disk is attached in READ_WRITE mode. + type: string + type: object + type: array + createdAt: + description: Creation timestamp of the instance. + type: string + description: + description: Description of the instance. + type: string + filesystem: + description: List of filesystems that are attached to the instance. + Structure is documented below. + items: + properties: + deviceName: + description: Name of the device representing the filesystem + on the instance. + type: string + filesystemId: + description: ID of the filesystem that should be attached. + type: string + mode: + description: Mode of access to the filesystem that should + be attached. By default, filesystem is attached in READ_WRITE + mode. + type: string + type: object + type: array + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + fqdn: + description: The fully qualified DNS name of this instance. + type: string + gpuClusterId: + description: ID of the GPU cluster to attach this instance to. + The GPU cluster must exist in the same zone as the instance. + type: string + hardwareGeneration: + items: + properties: + generation2Features: + items: + type: object + type: array + legacyFeatures: + items: + properties: + pciTopology: + type: string + type: object + type: array + type: object + type: array + hostname: + description: Host name for the instance. This field is used to + generate the instance fqdn value. The host name must be unique + within the network and region. If not specified, the host name + will be equal to id of the instance and fqdn will be .auto.internal. + Otherwise FQDN will be ..internal. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the instance. + type: object + x-kubernetes-map-type: granular + localDisk: + description: List of local disks that are attached to the instance. + Structure is documented below. + items: + properties: + deviceName: + description: The name of the local disk device. + type: string + sizeBytes: + description: Size of the disk, specified in bytes. + type: number + type: object + type: array + maintenanceGracePeriod: + description: Time between notification via metadata service and + maintenance. E.g., 60s. + type: string + maintenancePolicy: + description: 'Behaviour on maintenance events. The default is + unspecified. Values: unspecified, migrate, restart.' + type: string + metadata: + additionalProperties: + type: string + description: Metadata key/value pairs to make available from within + the instance. + type: object + x-kubernetes-map-type: granular + metadataOptions: + description: Options allow user to configure access to instance's + metadata + items: + properties: + awsV1HttpEndpoint: + type: number + awsV1HttpToken: + type: number + gceHttpEndpoint: + type: number + gceHttpToken: + type: number + type: object + type: array + name: + description: Resource name. + type: string + networkAccelerationType: + description: 'Type of network acceleration. The default is standard. + Values: standard, software_accelerated' + type: string + networkInterface: + description: Networks to attach to the instance. This can be specified + multiple times. The structure is documented below. + items: + properties: + dnsRecord: + description: List of configurations for creating ipv4 DNS + records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private zone + used). + type: string + fqdn: + description: DNS record FQDN (must have a dot at the + end). + type: string + ptr: + description: When set to true, also create a PTR DNS + record. + type: boolean + ttl: + description: DNS record TTL. in seconds + type: number + type: object + type: array + index: + description: Index of network interface, will be calculated + automatically for instance create or update operations + if not specified. Required for attach/detach operations. + type: number + ipAddress: + description: The private IP address to assign to the instance. + If empty, the address will be automatically assigned from + the specified subnet. + type: string + ipv4: + description: Allocate an IPv4 address for the interface. + The default value is true. + type: boolean + ipv6: + description: If true, allocate an IPv6 address for the interface. + The address will be automatically assigned from the specified + subnet. + type: boolean + ipv6Address: + description: The private IPv6 address to assign to the instance. + type: string + ipv6DnsRecord: + description: List of configurations for creating ipv6 DNS + records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private zone + used). + type: string + fqdn: + description: DNS record FQDN (must have a dot at the + end). + type: string + ptr: + description: When set to true, also create a PTR DNS + record. + type: boolean + ttl: + description: DNS record TTL. in seconds + type: number + type: object + type: array + macAddress: + type: string + nat: + description: Provide a public address, for instance, to + access the internet over NAT. + type: boolean + natDnsRecord: + description: List of configurations for creating ipv4 NAT + DNS records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private zone + used). + type: string + fqdn: + description: DNS record FQDN (must have a dot at the + end). + type: string + ptr: + description: When set to true, also create a PTR DNS + record. + type: boolean + ttl: + description: DNS record TTL. in seconds + type: number + type: object + type: array + natIpAddress: + description: Provide a public address, for instance, to + access the internet over NAT. Address should be already + reserved in web UI. + type: string + natIpVersion: + type: string + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: ID of the subnet to attach this interface to. + The subnet must exist in the same zone where this instance + will be created. + type: string + type: object + type: array + placementPolicy: + description: The placement policy configuration. The structure + is documented below. + items: + properties: + hostAffinityRules: + description: List of host affinity rules. The structure + is documented below. + items: + properties: + key: + description: Affinity label or one of reserved values + - yc.hostId, yc.hostGroupId. + type: string + op: + description: Affinity action. The only value supported + is IN. + type: string + values: + items: + type: string + type: array + type: object + type: array + placementGroupId: + description: Specifies the id of the Placement Group to + assign to the instance. + type: string + placementGroupPartition: + type: number + type: object + type: array + platformId: + description: The type of virtual machine to create. The default + is 'standard-v1'. + type: string + resources: + description: Compute resources that are allocated for the instance. + The structure is documented below. + items: + properties: + coreFraction: + description: If provided, specifies baseline performance + for a core as a percent. + type: number + cores: + description: CPU cores for the instance. + type: number + gpus: + description: If provided, specifies the number of GPU devices + for the instance + type: number + memory: + description: Memory size in GB. + type: number + type: object + type: array + schedulingPolicy: + description: Scheduling policy configuration. The structure is + documented below. + items: + properties: + preemptible: + description: Specifies if the instance is preemptible. Defaults + to false. + type: boolean + type: object + type: array + secondaryDisk: + description: 'A set of disks to attach to the instance. The structure + is documented below. Note: The allow_stopping_for_update property + must be set to true in order to update this structure.' + items: + properties: + autoDelete: + description: Whether the disk is auto-deleted when the instance + is deleted. The default value is false. + type: boolean + deviceName: + description: Name that can be used to access an attached + disk under /dev/disk/by-id/. + type: string + diskId: + description: ID of the disk that is attached to the instance. + type: string + mode: + description: Type of access to the disk resource. By default, + a disk is attached in READ_WRITE mode. + type: string + type: object + type: array + serviceAccountId: + description: ID of the service account authorized for this instance. + type: string + status: + description: The status of this instance. + type: string + zone: + description: The availability zone where the virtual machine will + be created. If it is not provided, the default provider folder + is used. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_placementgroupiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_placementgroupiambindings.yaml new file mode 100644 index 0000000..fbfaedf --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_placementgroupiambindings.yaml @@ -0,0 +1,386 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: placementgroupiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: PlacementGroupIAMBinding + listKind: PlacementGroupIAMBindingList + plural: placementgroupiambindings + singular: placementgroupiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PlacementGroupIAMBinding is the Schema for the PlacementGroupIAMBindings + API. Allows management of a single IAM binding for a Placement Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PlacementGroupIAMBindingSpec defines the desired state of + PlacementGroupIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + placementGroupId: + description: ID of the placement group to attach the policy to. + type: string + role: + description: The role that should be assigned. Only one yandex_compute_placement_group_iam_binding + can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + placementGroupId: + description: ID of the placement group to attach the policy to. + type: string + role: + description: The role that should be assigned. Only one yandex_compute_placement_group_iam_binding + can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.placementGroupId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.placementGroupId) + || (has(self.initProvider) && has(self.initProvider.placementGroupId))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: PlacementGroupIAMBindingStatus defines the observed state + of PlacementGroupIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + placementGroupId: + description: ID of the placement group to attach the policy to. + type: string + role: + description: The role that should be assigned. Only one yandex_compute_placement_group_iam_binding + can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_placementgroups.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_placementgroups.yaml new file mode 100644 index 0000000..bbfe739 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_placementgroups.yaml @@ -0,0 +1,558 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: placementgroups.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: PlacementGroup + listKind: PlacementGroupList + plural: placementgroups + singular: placementgroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PlacementGroup is the Schema for the PlacementGroups API. Manages + a Placement group resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PlacementGroupSpec defines the desired state of PlacementGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description of the Placement Group. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Placement + Group. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Placement Group. + type: string + placementStrategyPartitions: + description: A number of partitions in the placement strategy + with partitions policy of the Placement Group (conflicts with + placement_strategy_spread). + type: number + placementStrategySpread: + description: A placement strategy with spread policy of the Placement + Group. Should be true or unset (conflicts with placement_strategy_partitions). + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description of the Placement Group. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Placement + Group. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Placement Group. + type: string + placementStrategyPartitions: + description: A number of partitions in the placement strategy + with partitions policy of the Placement Group (conflicts with + placement_strategy_spread). + type: number + placementStrategySpread: + description: A placement strategy with spread policy of the Placement + Group. Should be true or unset (conflicts with placement_strategy_partitions). + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: PlacementGroupStatus defines the observed state of PlacementGroup. + properties: + atProvider: + properties: + createdAt: + type: string + description: + description: A description of the Placement Group. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Placement + Group. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Placement Group. + type: string + placementStrategyPartitions: + description: A number of partitions in the placement strategy + with partitions policy of the Placement Group (conflicts with + placement_strategy_spread). + type: number + placementStrategySpread: + description: A placement strategy with spread policy of the Placement + Group. Should be true or unset (conflicts with placement_strategy_partitions). + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotiambindings.yaml new file mode 100644 index 0000000..bf84382 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotiambindings.yaml @@ -0,0 +1,384 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: snapshotiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SnapshotIAMBinding + listKind: SnapshotIAMBindingList + plural: snapshotiambindings + singular: snapshotiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SnapshotIAMBinding is the Schema for the SnapshotIAMBindings + API. Allows management of a single IAM binding for a Snapshot. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SnapshotIAMBindingSpec defines the desired state of SnapshotIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_snapshot_iam_binding + can be used per role. + type: string + snapshotId: + description: ID of the snapshot to attach the policy to. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_snapshot_iam_binding + can be used per role. + type: string + snapshotId: + description: ID of the snapshot to attach the policy to. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + - message: spec.forProvider.snapshotId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.snapshotId) + || (has(self.initProvider) && has(self.initProvider.snapshotId))' + status: + description: SnapshotIAMBindingStatus defines the observed state of SnapshotIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_snapshot_iam_binding + can be used per role. + type: string + snapshotId: + description: ID of the snapshot to attach the policy to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshots.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshots.yaml new file mode 100644 index 0000000..69dea23 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshots.yaml @@ -0,0 +1,774 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: snapshots.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Snapshot + listKind: SnapshotList + plural: snapshots + singular: snapshot + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Snapshot is the Schema for the Snapshots API. Creates a new snapshot + of a disk. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SnapshotSpec defines the desired state of Snapshot + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the resource. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hardwareGeneration: + description: |- + Hardware generation and its features, + which will be applied to the instance when this snapshot is used as a boot + disk source. Provide this property if you wish to override this value, which + otherwise is inherited from the source. The structure is documented below. + items: + properties: + generation2Features: + description: A newer hardware generation, which always uses + PCI_TOPOLOGY_V2 and UEFI boot. + items: + type: object + type: array + legacyFeatures: + description: 'Defines the first known hardware generation + and its features, which are:' + items: + properties: + pciTopology: + description: A variant of PCI topology, one of PCI_TOPOLOGY_V1 + or PCI_TOPOLOGY_V2. + type: string + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the snapshot. + type: object + x-kubernetes-map-type: granular + name: + description: A name for the resource. + type: string + sourceDiskId: + description: ID of the disk to create a snapshot from. + type: string + sourceDiskIdRef: + description: Reference to a Disk to populate sourceDiskId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceDiskIdSelector: + description: Selector for a Disk to populate sourceDiskId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the resource. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hardwareGeneration: + description: |- + Hardware generation and its features, + which will be applied to the instance when this snapshot is used as a boot + disk source. Provide this property if you wish to override this value, which + otherwise is inherited from the source. The structure is documented below. + items: + properties: + generation2Features: + description: A newer hardware generation, which always uses + PCI_TOPOLOGY_V2 and UEFI boot. + items: + type: object + type: array + legacyFeatures: + description: 'Defines the first known hardware generation + and its features, which are:' + items: + properties: + pciTopology: + description: A variant of PCI topology, one of PCI_TOPOLOGY_V1 + or PCI_TOPOLOGY_V2. + type: string + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the snapshot. + type: object + x-kubernetes-map-type: granular + name: + description: A name for the resource. + type: string + sourceDiskId: + description: ID of the disk to create a snapshot from. + type: string + sourceDiskIdRef: + description: Reference to a Disk to populate sourceDiskId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceDiskIdSelector: + description: Selector for a Disk to populate sourceDiskId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SnapshotStatus defines the observed state of Snapshot. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the snapshot. + type: string + description: + description: Description of the resource. + type: string + diskSize: + description: Size of the disk when the snapshot was created, specified + in GB. + type: number + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + hardwareGeneration: + description: |- + Hardware generation and its features, + which will be applied to the instance when this snapshot is used as a boot + disk source. Provide this property if you wish to override this value, which + otherwise is inherited from the source. The structure is documented below. + items: + properties: + generation2Features: + description: A newer hardware generation, which always uses + PCI_TOPOLOGY_V2 and UEFI boot. + items: + type: object + type: array + legacyFeatures: + description: 'Defines the first known hardware generation + and its features, which are:' + items: + properties: + pciTopology: + description: A variant of PCI topology, one of PCI_TOPOLOGY_V1 + or PCI_TOPOLOGY_V2. + type: string + type: object + type: array + type: object + type: array + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the snapshot. + type: object + x-kubernetes-map-type: granular + name: + description: A name for the resource. + type: string + sourceDiskId: + description: ID of the disk to create a snapshot from. + type: string + storageSize: + description: Size of the snapshot, specified in GB. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotscheduleiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotscheduleiambindings.yaml new file mode 100644 index 0000000..20df284 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotscheduleiambindings.yaml @@ -0,0 +1,389 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: snapshotscheduleiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SnapshotScheduleIAMBinding + listKind: SnapshotScheduleIAMBindingList + plural: snapshotscheduleiambindings + singular: snapshotscheduleiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SnapshotScheduleIAMBinding is the Schema for the SnapshotScheduleIAMBindings + API. Allows management of a single IAM binding for a Snapshot Schedule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SnapshotScheduleIAMBindingSpec defines the desired state + of SnapshotScheduleIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_snapshot_schedule_iam_binding + can be used per role. + type: string + snapshotScheduleId: + description: ID of the snapshot schedule to attach the policy + to. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_snapshot_schedule_iam_binding + can be used per role. + type: string + snapshotScheduleId: + description: ID of the snapshot schedule to attach the policy + to. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + - message: spec.forProvider.snapshotScheduleId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.snapshotScheduleId) + || (has(self.initProvider) && has(self.initProvider.snapshotScheduleId))' + status: + description: SnapshotScheduleIAMBindingStatus defines the observed state + of SnapshotScheduleIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_compute_snapshot_schedule_iam_binding + can be used per role. + type: string + snapshotScheduleId: + description: ID of the snapshot schedule to attach the policy + to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotschedules.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotschedules.yaml new file mode 100644 index 0000000..e0d7d2f --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotschedules.yaml @@ -0,0 +1,845 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: snapshotschedules.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SnapshotSchedule + listKind: SnapshotScheduleList + plural: snapshotschedules + singular: snapshotschedule + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SnapshotSchedule is the Schema for the SnapshotSchedules API. + Creates a new snapshot schedule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SnapshotScheduleSpec defines the desired state of SnapshotSchedule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the resource. + type: string + diskIds: + description: IDs of the disk for snapshot schedule. + items: + type: string + type: array + x-kubernetes-list-type: set + diskIdsRefs: + description: References to Disk to populate diskIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + diskIdsSelector: + description: Selector for a list of Disk to populate diskIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the snapshot + schedule. + type: object + x-kubernetes-map-type: granular + name: + description: A name for the resource. + type: string + retentionPeriod: + description: 'Time duration applied to snapshots created by this + snapshot schedule. This is a signed sequence of decimal numbers, + each with optional fraction and a unit suffix. Valid time units + are "ns", "us" (or "µs"), "ms", "s", "m", "h". Examples: "300ms", + "1.5h" or "2h45m".' + type: string + schedulePolicy: + description: Schedule policy of the snapshot schedule. + items: + properties: + expression: + description: Cron expression to schedule snapshots (in cron + format "* * * * *"). + type: string + startAt: + description: Time to start the snapshot schedule (in format + RFC3339 "2006-01-02T15:04:05Z07:00"). If empty current + time will be used. Unlike an expression that specifies + regularity rules, the start_at parameter determines from + what point these rules will be applied. + type: string + type: object + type: array + snapshotCount: + description: Maximum number of snapshots for every disk of the + snapshot schedule. + type: number + snapshotSpec: + description: Additional attributes for snapshots created by this + snapshot schedule. + items: + properties: + description: + description: Description to assign to snapshots created + by this snapshot schedule. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to + snapshots created by this snapshot schedule. + type: object + x-kubernetes-map-type: granular + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the resource. + type: string + diskIds: + description: IDs of the disk for snapshot schedule. + items: + type: string + type: array + x-kubernetes-list-type: set + diskIdsRefs: + description: References to Disk to populate diskIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + diskIdsSelector: + description: Selector for a list of Disk to populate diskIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the snapshot + schedule. + type: object + x-kubernetes-map-type: granular + name: + description: A name for the resource. + type: string + retentionPeriod: + description: 'Time duration applied to snapshots created by this + snapshot schedule. This is a signed sequence of decimal numbers, + each with optional fraction and a unit suffix. Valid time units + are "ns", "us" (or "µs"), "ms", "s", "m", "h". Examples: "300ms", + "1.5h" or "2h45m".' + type: string + schedulePolicy: + description: Schedule policy of the snapshot schedule. + items: + properties: + expression: + description: Cron expression to schedule snapshots (in cron + format "* * * * *"). + type: string + startAt: + description: Time to start the snapshot schedule (in format + RFC3339 "2006-01-02T15:04:05Z07:00"). If empty current + time will be used. Unlike an expression that specifies + regularity rules, the start_at parameter determines from + what point these rules will be applied. + type: string + type: object + type: array + snapshotCount: + description: Maximum number of snapshots for every disk of the + snapshot schedule. + type: number + snapshotSpec: + description: Additional attributes for snapshots created by this + snapshot schedule. + items: + properties: + description: + description: Description to assign to snapshots created + by this snapshot schedule. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to + snapshots created by this snapshot schedule. + type: object + x-kubernetes-map-type: granular + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SnapshotScheduleStatus defines the observed state of SnapshotSchedule. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the snapshot schedule. + type: string + description: + description: Description of the resource. + type: string + diskIds: + description: IDs of the disk for snapshot schedule. + items: + type: string + type: array + x-kubernetes-list-type: set + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the snapshot + schedule. + type: object + x-kubernetes-map-type: granular + name: + description: A name for the resource. + type: string + retentionPeriod: + description: 'Time duration applied to snapshots created by this + snapshot schedule. This is a signed sequence of decimal numbers, + each with optional fraction and a unit suffix. Valid time units + are "ns", "us" (or "µs"), "ms", "s", "m", "h". Examples: "300ms", + "1.5h" or "2h45m".' + type: string + schedulePolicy: + description: Schedule policy of the snapshot schedule. + items: + properties: + expression: + description: Cron expression to schedule snapshots (in cron + format "* * * * *"). + type: string + startAt: + description: Time to start the snapshot schedule (in format + RFC3339 "2006-01-02T15:04:05Z07:00"). If empty current + time will be used. Unlike an expression that specifies + regularity rules, the start_at parameter determines from + what point these rules will be applied. + type: string + type: object + type: array + snapshotCount: + description: Maximum number of snapshots for every disk of the + snapshot schedule. + type: number + snapshotSpec: + description: Additional attributes for snapshots created by this + snapshot schedule. + items: + properties: + description: + description: Description to assign to snapshots created + by this snapshot schedule. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to + snapshots created by this snapshot schedule. + type: object + x-kubernetes-map-type: granular + type: object + type: array + status: + description: The status of the snapshot schedule. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/container.yandex-cloud.upjet.crossplane.io_registries.yaml b/package/crds/container.yandex-cloud.upjet.crossplane.io_registries.yaml new file mode 100644 index 0000000..53920f9 --- /dev/null +++ b/package/crds/container.yandex-cloud.upjet.crossplane.io_registries.yaml @@ -0,0 +1,523 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: registries.container.yandex-cloud.upjet.crossplane.io +spec: + group: container.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Registry + listKind: RegistryList + plural: registries + singular: registry + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Registry is the Schema for the Registrys API. Creates a new container + registry. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RegistrySpec defines the desired state of Registry + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the registry. + type: object + x-kubernetes-map-type: granular + name: + description: A name of the registry. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the registry. + type: object + x-kubernetes-map-type: granular + name: + description: A name of the registry. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: RegistryStatus defines the observed state of Registry. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the registry. + type: string + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the registry. + type: object + x-kubernetes-map-type: granular + name: + description: A name of the registry. + type: string + status: + description: Status of the registry. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/container.yandex-cloud.upjet.crossplane.io_registryiambindings.yaml b/package/crds/container.yandex-cloud.upjet.crossplane.io_registryiambindings.yaml new file mode 100644 index 0000000..935a1c1 --- /dev/null +++ b/package/crds/container.yandex-cloud.upjet.crossplane.io_registryiambindings.yaml @@ -0,0 +1,683 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: registryiambindings.container.yandex-cloud.upjet.crossplane.io +spec: + group: container.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: RegistryIAMBinding + listKind: RegistryIAMBindingList + plural: registryiambindings + singular: registryiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: RegistryIAMBinding is the Schema for the RegistryIAMBindings + API. Allows management of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RegistryIAMBindingSpec defines the desired state of RegistryIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + registryId: + description: The Yandex Container Registry ID to apply a binding + to. + type: string + registryIdRef: + description: Reference to a Registry to populate registryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + registryIdSelector: + description: Selector for a Registry to populate registryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + registryId: + description: The Yandex Container Registry ID to apply a binding + to. + type: string + registryIdRef: + description: Reference to a Registry to populate registryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + registryIdSelector: + description: Selector for a Registry to populate registryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: RegistryIAMBindingStatus defines the observed state of RegistryIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + registryId: + description: The Yandex Container Registry ID to apply a binding + to. + type: string + role: + description: The role that should be applied. See roles. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/container.yandex-cloud.upjet.crossplane.io_registryippermissions.yaml b/package/crds/container.yandex-cloud.upjet.crossplane.io_registryippermissions.yaml new file mode 100644 index 0000000..5567bf6 --- /dev/null +++ b/package/crds/container.yandex-cloud.upjet.crossplane.io_registryippermissions.yaml @@ -0,0 +1,523 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: registryippermissions.container.yandex-cloud.upjet.crossplane.io +spec: + group: container.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: RegistryIPPermission + listKind: RegistryIPPermissionList + plural: registryippermissions + singular: registryippermission + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: RegistryIPPermission is the Schema for the RegistryIPPermissions + API. Creates a new Container Registry IP Permission. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RegistryIPPermissionSpec defines the desired state of RegistryIPPermission + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + pull: + description: List of configured CIDRs, from which pull is allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + push: + description: List of configured CIDRs, from which push is allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + registryId: + description: The ID of the registry that ip restrictions applied + to. + type: string + registryIdRef: + description: Reference to a Registry to populate registryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + registryIdSelector: + description: Selector for a Registry to populate registryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + pull: + description: List of configured CIDRs, from which pull is allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + push: + description: List of configured CIDRs, from which push is allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + registryId: + description: The ID of the registry that ip restrictions applied + to. + type: string + registryIdRef: + description: Reference to a Registry to populate registryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + registryIdSelector: + description: Selector for a Registry to populate registryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: RegistryIPPermissionStatus defines the observed state of + RegistryIPPermission. + properties: + atProvider: + properties: + id: + type: string + pull: + description: List of configured CIDRs, from which pull is allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + push: + description: List of configured CIDRs, from which push is allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + registryId: + description: The ID of the registry that ip restrictions applied + to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/container.yandex-cloud.upjet.crossplane.io_repositories.yaml b/package/crds/container.yandex-cloud.upjet.crossplane.io_repositories.yaml new file mode 100644 index 0000000..ca64f92 --- /dev/null +++ b/package/crds/container.yandex-cloud.upjet.crossplane.io_repositories.yaml @@ -0,0 +1,340 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: repositories.container.yandex-cloud.upjet.crossplane.io +spec: + group: container.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Repository + listKind: RepositoryList + plural: repositories + singular: repository + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Repository is the Schema for the Repositorys API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RepositorySpec defines the desired state of Repository + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: (String) + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: (String) + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: RepositoryStatus defines the observed state of Repository. + properties: + atProvider: + properties: + id: + description: (String) The ID of this resource. + type: string + name: + description: (String) + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/container.yandex-cloud.upjet.crossplane.io_repositoryiambindings.yaml b/package/crds/container.yandex-cloud.upjet.crossplane.io_repositoryiambindings.yaml new file mode 100644 index 0000000..cd2fcb7 --- /dev/null +++ b/package/crds/container.yandex-cloud.upjet.crossplane.io_repositoryiambindings.yaml @@ -0,0 +1,682 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: repositoryiambindings.container.yandex-cloud.upjet.crossplane.io +spec: + group: container.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: RepositoryIAMBinding + listKind: RepositoryIAMBindingList + plural: repositoryiambindings + singular: repositoryiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: RepositoryIAMBinding is the Schema for the RepositoryIAMBindings + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RepositoryIAMBindingSpec defines the desired state of RepositoryIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + repositoryId: + description: (String) + type: string + repositoryIdRef: + description: Reference to a Repository to populate repositoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + repositoryIdSelector: + description: Selector for a Repository to populate repositoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + role: + description: (String) + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + description: (Number) + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + repositoryId: + description: (String) + type: string + repositoryIdRef: + description: Reference to a Repository to populate repositoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + repositoryIdSelector: + description: Selector for a Repository to populate repositoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + role: + description: (String) + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + description: (Number) + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: RepositoryIAMBindingStatus defines the observed state of + RepositoryIAMBinding. + properties: + atProvider: + properties: + id: + description: (String) The ID of this resource. + type: string + members: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + repositoryId: + description: (String) + type: string + role: + description: (String) + type: string + sleepAfter: + description: (Number) + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/container.yandex-cloud.upjet.crossplane.io_repositorylifecyclepolicies.yaml b/package/crds/container.yandex-cloud.upjet.crossplane.io_repositorylifecyclepolicies.yaml new file mode 100644 index 0000000..5ed3d1b --- /dev/null +++ b/package/crds/container.yandex-cloud.upjet.crossplane.io_repositorylifecyclepolicies.yaml @@ -0,0 +1,584 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: repositorylifecyclepolicies.container.yandex-cloud.upjet.crossplane.io +spec: + group: container.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: RepositoryLifecyclePolicy + listKind: RepositoryLifecyclePolicyList + plural: repositorylifecyclepolicies + singular: repositorylifecyclepolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: RepositoryLifecyclePolicy is the Schema for the RepositoryLifecyclePolicys + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RepositoryLifecyclePolicySpec defines the desired state of + RepositoryLifecyclePolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: (String) + type: string + name: + description: (String) + type: string + repositoryId: + description: (String) + type: string + repositoryIdRef: + description: Reference to a Repository to populate repositoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + repositoryIdSelector: + description: Selector for a Repository to populate repositoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rule: + description: (Block List) (see below for nested schema) + items: + properties: + description: + description: (String) + type: string + expirePeriod: + description: (String) + type: string + retainedTop: + description: (Number) + type: number + tagRegexp: + description: (String) + type: string + untagged: + description: (Boolean) + type: boolean + type: object + type: array + status: + description: (String) + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: (String) + type: string + name: + description: (String) + type: string + repositoryId: + description: (String) + type: string + repositoryIdRef: + description: Reference to a Repository to populate repositoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + repositoryIdSelector: + description: Selector for a Repository to populate repositoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rule: + description: (Block List) (see below for nested schema) + items: + properties: + description: + description: (String) + type: string + expirePeriod: + description: (String) + type: string + retainedTop: + description: (Number) + type: number + tagRegexp: + description: (String) + type: string + untagged: + description: (Boolean) + type: boolean + type: object + type: array + status: + description: (String) + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.status is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.status) + || (has(self.initProvider) && has(self.initProvider.status))' + status: + description: RepositoryLifecyclePolicyStatus defines the observed state + of RepositoryLifecyclePolicy. + properties: + atProvider: + properties: + createdAt: + description: (String) + type: string + description: + description: (String) + type: string + id: + description: (String) The ID of this resource. + type: string + name: + description: (String) + type: string + repositoryId: + description: (String) + type: string + rule: + description: (Block List) (see below for nested schema) + items: + properties: + description: + description: (String) + type: string + expirePeriod: + description: (String) + type: string + retainedTop: + description: (Number) + type: number + tagRegexp: + description: (String) + type: string + untagged: + description: (Boolean) + type: boolean + type: object + type: array + status: + description: (String) + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/dataproc.yandex-cloud.upjet.crossplane.io_clusters.yaml b/package/crds/dataproc.yandex-cloud.upjet.crossplane.io_clusters.yaml new file mode 100644 index 0000000..2d295b8 --- /dev/null +++ b/package/crds/dataproc.yandex-cloud.upjet.crossplane.io_clusters.yaml @@ -0,0 +1,1567 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clusters.dataproc.yandex-cloud.upjet.crossplane.io +spec: + group: dataproc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Manages a Data Proc + cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the Object Storage bucket to use for Data + Proc jobs. Data Proc Agent saves output of job driver's process + to specified bucket. In order for this to work service account + (specified by the service_account_id argument) should be given + permission to create objects within this bucket. + type: string + bucketRef: + description: Reference to a Bucket in storage to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in storage to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + clusterConfig: + description: Configuration and resources for hosts that should + be created with the cluster. The structure is documented below. + items: + properties: + hadoop: + description: Data Proc specific options. The structure is + documented below. + items: + properties: + initializationAction: + description: List of initialization scripts. The structure + is documented below. + items: + properties: + args: + description: List of arguments of the initialization + script. + items: + type: string + type: array + timeout: + description: Script execution timeout, in seconds. + type: string + uri: + description: Script URI. + type: string + type: object + type: array + properties: + additionalProperties: + type: string + description: A set of key/value pairs that are used + to configure cluster services. + type: object + x-kubernetes-map-type: granular + services: + description: List of services to run on Data Proc + cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + sshPublicKeys: + description: List of SSH public keys to put to the + hosts of the cluster. For information on how to + connect to the cluster, see the official documentation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + subclusterSpec: + description: Configuration of the Data Proc subcluster. + The structure is documented below. + items: + properties: + assignPublicIp: + description: If true then assign public IP addresses + to the hosts of the subclusters. + type: boolean + autoscalingConfig: + description: Autoscaling configuration for compute + subclusters. + items: + properties: + cpuUtilizationTarget: + description: Defines an autoscaling rule based + on the average CPU utilization of the instance + group. If not set default autoscaling metric + will be used. + type: string + decommissionTimeout: + description: Timeout to gracefully decommission + nodes during downscaling. In seconds. + type: string + maxHostsCount: + description: Maximum number of nodes in autoscaling + subclusters. + type: number + measurementDuration: + description: Time in seconds allotted for averaging + metrics. + type: string + preemptible: + description: Bool flag -- whether to use preemptible + compute instances. Preemptible instances are + stopped at least once every 24 hours, and + can be stopped at any time if their resources + are needed by Compute. For more information, + see Preemptible Virtual Machines. + type: boolean + stabilizationDuration: + description: Minimum amount of time in seconds + allotted for monitoring before Instance Groups + can reduce the number of instances in the + group. During this time, the group size doesn't + decrease, even if the new metric values indicate + that it should. + type: string + warmupDuration: + description: The warmup time of the instance + in seconds. During this time, traffic is sent + to the instance, but instance metrics are + not collected. + type: string + type: object + type: array + hostsCount: + description: Number of hosts within Data Proc subcluster. + type: number + name: + description: Name of the Data Proc subcluster. + type: string + resources: + description: Resources allocated to each host of the + Data Proc subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of a host. + One of network-hdd (default) or network-ssd. + type: string + resourcePresetId: + description: The ID of the preset for computational + resources available to a host. All available + presets are listed in the documentation. + type: string + type: object + type: array + role: + description: Role of the subcluster in the Data Proc + cluster. + type: string + subnetId: + description: The ID of the subnet, to which hosts + of the subcluster belong. Subnets of all the subclusters + must belong to the same VPC network. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + versionId: + description: Version of Data Proc image. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Data Proc cluster. + type: string + folderId: + description: ID of the folder to create a cluster in. If it is + not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostGroupIds: + description: A list of host group IDs to place VMs of the cluster + on. + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Data + Proc cluster. + type: object + x-kubernetes-map-type: granular + name: + description: Name of a specific Data Proc cluster. + type: string + securityGroupIds: + description: A list of security group IDs that the cluster belongs + to. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountId: + description: Service account to be used by the Data Proc agent + to access resources of Yandex.Cloud. Selected service account + should have mdb.dataproc.agent role on the folder where the + Data Proc cluster will be located. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + uiProxy: + description: Whether to enable UI Proxy feature. + type: boolean + zoneId: + description: ID of the availability zone to create cluster in. + If it is not provided, the default provider zone is used. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the Object Storage bucket to use for Data + Proc jobs. Data Proc Agent saves output of job driver's process + to specified bucket. In order for this to work service account + (specified by the service_account_id argument) should be given + permission to create objects within this bucket. + type: string + bucketRef: + description: Reference to a Bucket in storage to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in storage to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + clusterConfig: + description: Configuration and resources for hosts that should + be created with the cluster. The structure is documented below. + items: + properties: + hadoop: + description: Data Proc specific options. The structure is + documented below. + items: + properties: + initializationAction: + description: List of initialization scripts. The structure + is documented below. + items: + properties: + args: + description: List of arguments of the initialization + script. + items: + type: string + type: array + timeout: + description: Script execution timeout, in seconds. + type: string + uri: + description: Script URI. + type: string + type: object + type: array + properties: + additionalProperties: + type: string + description: A set of key/value pairs that are used + to configure cluster services. + type: object + x-kubernetes-map-type: granular + services: + description: List of services to run on Data Proc + cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + sshPublicKeys: + description: List of SSH public keys to put to the + hosts of the cluster. For information on how to + connect to the cluster, see the official documentation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + subclusterSpec: + description: Configuration of the Data Proc subcluster. + The structure is documented below. + items: + properties: + assignPublicIp: + description: If true then assign public IP addresses + to the hosts of the subclusters. + type: boolean + autoscalingConfig: + description: Autoscaling configuration for compute + subclusters. + items: + properties: + cpuUtilizationTarget: + description: Defines an autoscaling rule based + on the average CPU utilization of the instance + group. If not set default autoscaling metric + will be used. + type: string + decommissionTimeout: + description: Timeout to gracefully decommission + nodes during downscaling. In seconds. + type: string + maxHostsCount: + description: Maximum number of nodes in autoscaling + subclusters. + type: number + measurementDuration: + description: Time in seconds allotted for averaging + metrics. + type: string + preemptible: + description: Bool flag -- whether to use preemptible + compute instances. Preemptible instances are + stopped at least once every 24 hours, and + can be stopped at any time if their resources + are needed by Compute. For more information, + see Preemptible Virtual Machines. + type: boolean + stabilizationDuration: + description: Minimum amount of time in seconds + allotted for monitoring before Instance Groups + can reduce the number of instances in the + group. During this time, the group size doesn't + decrease, even if the new metric values indicate + that it should. + type: string + warmupDuration: + description: The warmup time of the instance + in seconds. During this time, traffic is sent + to the instance, but instance metrics are + not collected. + type: string + type: object + type: array + hostsCount: + description: Number of hosts within Data Proc subcluster. + type: number + name: + description: Name of the Data Proc subcluster. + type: string + resources: + description: Resources allocated to each host of the + Data Proc subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of a host. + One of network-hdd (default) or network-ssd. + type: string + resourcePresetId: + description: The ID of the preset for computational + resources available to a host. All available + presets are listed in the documentation. + type: string + type: object + type: array + role: + description: Role of the subcluster in the Data Proc + cluster. + type: string + subnetId: + description: The ID of the subnet, to which hosts + of the subcluster belong. Subnets of all the subclusters + must belong to the same VPC network. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + versionId: + description: Version of Data Proc image. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Data Proc cluster. + type: string + folderId: + description: ID of the folder to create a cluster in. If it is + not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostGroupIds: + description: A list of host group IDs to place VMs of the cluster + on. + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Data + Proc cluster. + type: object + x-kubernetes-map-type: granular + name: + description: Name of a specific Data Proc cluster. + type: string + securityGroupIds: + description: A list of security group IDs that the cluster belongs + to. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountId: + description: Service account to be used by the Data Proc agent + to access resources of Yandex.Cloud. Selected service account + should have mdb.dataproc.agent role on the folder where the + Data Proc cluster will be located. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + uiProxy: + description: Whether to enable UI Proxy feature. + type: boolean + zoneId: + description: ID of the availability zone to create cluster in. + If it is not provided, the default provider zone is used. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterConfig) + || (has(self.initProvider) && has(self.initProvider.clusterConfig))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + bucket: + description: Name of the Object Storage bucket to use for Data + Proc jobs. Data Proc Agent saves output of job driver's process + to specified bucket. In order for this to work service account + (specified by the service_account_id argument) should be given + permission to create objects within this bucket. + type: string + clusterConfig: + description: Configuration and resources for hosts that should + be created with the cluster. The structure is documented below. + items: + properties: + hadoop: + description: Data Proc specific options. The structure is + documented below. + items: + properties: + initializationAction: + description: List of initialization scripts. The structure + is documented below. + items: + properties: + args: + description: List of arguments of the initialization + script. + items: + type: string + type: array + timeout: + description: Script execution timeout, in seconds. + type: string + uri: + description: Script URI. + type: string + type: object + type: array + properties: + additionalProperties: + type: string + description: A set of key/value pairs that are used + to configure cluster services. + type: object + x-kubernetes-map-type: granular + services: + description: List of services to run on Data Proc + cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + sshPublicKeys: + description: List of SSH public keys to put to the + hosts of the cluster. For information on how to + connect to the cluster, see the official documentation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + subclusterSpec: + description: Configuration of the Data Proc subcluster. + The structure is documented below. + items: + properties: + assignPublicIp: + description: If true then assign public IP addresses + to the hosts of the subclusters. + type: boolean + autoscalingConfig: + description: Autoscaling configuration for compute + subclusters. + items: + properties: + cpuUtilizationTarget: + description: Defines an autoscaling rule based + on the average CPU utilization of the instance + group. If not set default autoscaling metric + will be used. + type: string + decommissionTimeout: + description: Timeout to gracefully decommission + nodes during downscaling. In seconds. + type: string + maxHostsCount: + description: Maximum number of nodes in autoscaling + subclusters. + type: number + measurementDuration: + description: Time in seconds allotted for averaging + metrics. + type: string + preemptible: + description: Bool flag -- whether to use preemptible + compute instances. Preemptible instances are + stopped at least once every 24 hours, and + can be stopped at any time if their resources + are needed by Compute. For more information, + see Preemptible Virtual Machines. + type: boolean + stabilizationDuration: + description: Minimum amount of time in seconds + allotted for monitoring before Instance Groups + can reduce the number of instances in the + group. During this time, the group size doesn't + decrease, even if the new metric values indicate + that it should. + type: string + warmupDuration: + description: The warmup time of the instance + in seconds. During this time, traffic is sent + to the instance, but instance metrics are + not collected. + type: string + type: object + type: array + hostsCount: + description: Number of hosts within Data Proc subcluster. + type: number + id: + description: (Computed) ID of a new Data Proc cluster. + type: string + name: + description: Name of the Data Proc subcluster. + type: string + resources: + description: Resources allocated to each host of the + Data Proc subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of a host. + One of network-hdd (default) or network-ssd. + type: string + resourcePresetId: + description: The ID of the preset for computational + resources available to a host. All available + presets are listed in the documentation. + type: string + type: object + type: array + role: + description: Role of the subcluster in the Data Proc + cluster. + type: string + subnetId: + description: The ID of the subnet, to which hosts + of the subcluster belong. Subnets of all the subclusters + must belong to the same VPC network. + type: string + type: object + type: array + versionId: + description: Version of Data Proc image. + type: string + type: object + type: array + createdAt: + description: (Computed) The Data Proc cluster creation timestamp. + type: string + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Data Proc cluster. + type: string + folderId: + description: ID of the folder to create a cluster in. If it is + not provided, the default provider folder is used. + type: string + hostGroupIds: + description: A list of host group IDs to place VMs of the cluster + on. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: (Computed) ID of a new Data Proc cluster. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Data + Proc cluster. + type: object + x-kubernetes-map-type: granular + name: + description: Name of a specific Data Proc cluster. + type: string + securityGroupIds: + description: A list of security group IDs that the cluster belongs + to. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountId: + description: Service account to be used by the Data Proc agent + to access resources of Yandex.Cloud. Selected service account + should have mdb.dataproc.agent role on the folder where the + Data Proc cluster will be located. + type: string + uiProxy: + description: Whether to enable UI Proxy feature. + type: boolean + zoneId: + description: ID of the availability zone to create cluster in. + If it is not provided, the default provider zone is used. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/datatransfer.yandex-cloud.upjet.crossplane.io_endpoints.yaml b/package/crds/datatransfer.yandex-cloud.upjet.crossplane.io_endpoints.yaml new file mode 100644 index 0000000..fd25c76 --- /dev/null +++ b/package/crds/datatransfer.yandex-cloud.upjet.crossplane.io_endpoints.yaml @@ -0,0 +1,10216 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: endpoints.datatransfer.yandex-cloud.upjet.crossplane.io +spec: + group: datatransfer.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Endpoint + listKind: EndpointList + plural: endpoints + singular: endpoint + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Endpoint is the Schema for the Endpoints API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EndpointSpec defines the desired state of Endpoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: (String) + type: string + folderId: + description: (String) + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: (Map of String) + type: object + x-kubernetes-map-type: granular + name: + description: (String) + type: string + settings: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + clickhouseSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + clickhouseClusterName: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + database: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + httpPort: + description: (Number) + type: number + nativePort: + description: (Number) + type: number + shards: + description: (Block List) (see below + for nested schema) + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + name: + description: (String) + type: string + type: object + type: array + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + clickhouseTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + altNames: + description: (Block List) (see below for nested schema) + items: + properties: + fromName: + description: (String) + type: string + toName: + description: (String) + type: string + type: object + type: array + cleanupPolicy: + description: (String) + type: string + clickhouseClusterName: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + database: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + httpPort: + description: (Number) + type: number + nativePort: + description: (Number) + type: number + shards: + description: (Block List) (see below + for nested schema) + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + name: + description: (String) + type: string + type: object + type: array + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + sharding: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columnValueHash: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + columnName: + description: (String) + type: string + type: object + type: array + customMapping: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + columnName: + description: (String) + type: string + mapping: + description: (Block List) (see below for + nested schema) + items: + properties: + columnValue: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + stringValue: + description: (String) + type: string + type: object + type: array + shardName: + description: (String) + type: string + type: object + type: array + type: object + type: array + roundRobin: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + transferId: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + type: object + type: array + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + kafkaSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + auth: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + noAuth: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + sasl: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + mechanism: + description: (String) + type: string + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + clusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + brokerUrls: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + parser: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + auditTrailsV1Parser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + cloudLoggingParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + jsonParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + tskvParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + topicName: + description: (String) + type: string + topicNames: + description: (List of String) + items: + type: string + type: array + transformer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + bufferFlushInterval: + description: (String) + type: string + bufferSize: + description: (String) + type: string + cloudFunction: + description: (String) + type: string + invocationTimeout: + description: (String) + type: string + numberOfRetries: + description: (Number) + type: number + serviceAccountId: + description: (String) + type: string + type: object + type: array + type: object + type: array + kafkaTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + auth: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + noAuth: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + sasl: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + mechanism: + description: (String) + type: string + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + clusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + brokerUrls: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serializer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + serializerAuto: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + serializerDebezium: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + serializerParameters: + description: (Block List) (see below for + nested schema) + items: + properties: + key: + description: (Boolean) + type: string + value: + description: (String) + type: string + type: object + type: array + type: object + type: array + serializerJson: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + type: object + type: array + topicSettings: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + topic: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + saveTxOrder: + description: (Boolean) + type: boolean + topicName: + description: (String) + type: string + type: object + type: array + topicPrefix: + description: (String) + type: string + type: object + type: array + type: object + type: array + metrikaSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + counterIds: + description: (List of Number) + items: + type: number + type: array + streams: + description: (Block List) (see below for nested schema) + items: + properties: + columns: + description: (List of String) + items: + type: string + type: array + type: + description: (String) + type: string + type: object + type: array + token: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + type: array + mongoSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + collections: + description: (Block List) (see below for nested schema) + items: + properties: + collectionName: + description: (String) + type: string + databaseName: + description: (String) + type: string + type: object + type: array + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + authSource: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a MongodbCluster + in mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a MongodbCluster + in mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + replicaSet: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + excludedCollections: + description: (Block List) (see below for nested schema) + items: + properties: + collectionName: + description: (String) + type: string + databaseName: + description: (String) + type: string + type: object + type: array + secondaryPreferredMode: + description: (Boolean) + type: boolean + securityGroups: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + mongoTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + authSource: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a MongodbCluster + in mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a MongodbCluster + in mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + replicaSet: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + securityGroups: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + mysqlSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a MySQLCluster in + mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a MySQLCluster in + mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + databaseRef: + description: Reference to a MySQLDatabase in mdb to + populate database. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseSelector: + description: Selector for a MySQLDatabase in mdb to + populate database. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + excludeTablesRegex: + description: (List of String) + items: + type: string + type: array + includeTablesRegex: + description: (List of String) + items: + type: string + type: array + objectTransferSettings: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + routine: + description: (String) + type: string + tables: + description: (String) + type: string + trigger: + description: (String) + type: string + view: + description: (String) + type: string + type: object + type: array + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + securityGroupsRefs: + description: References to SecurityGroup in vpc to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceDatabase: + description: (String) + type: string + timezone: + description: (String) + type: string + user: + description: (String) + type: string + userRef: + description: Reference to a MySQLUser in mdb to populate + user. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSelector: + description: Selector for a MySQLUser in mdb to populate + user. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + mysqlTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a MySQLCluster in + mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a MySQLCluster in + mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + databaseRef: + description: Reference to a MySQLDatabase in mdb to + populate database. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseSelector: + description: Selector for a MySQLDatabase in mdb to + populate database. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + securityGroupsRefs: + description: References to SecurityGroup in vpc to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceDatabase: + description: (String) + type: string + skipConstraintChecks: + description: (Boolean) + type: boolean + sqlMode: + description: (String) + type: string + timezone: + description: (String) + type: string + user: + description: (String) + type: string + userRef: + description: Reference to a MySQLUser in mdb to populate + user. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSelector: + description: Selector for a MySQLUser in mdb to populate + user. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + postgresSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a PostgresqlCluster + in mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a PostgresqlCluster + in mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + databaseRef: + description: Reference to a PostgresqlDatabase in + mdb to populate database. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseSelector: + description: Selector for a PostgresqlDatabase in + mdb to populate database. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + objectTransferSettings: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + cast: + description: (String) + type: string + collation: + description: (String) + type: string + constraint: + description: (String) + type: string + defaultValues: + description: (String) + type: string + fkConstraint: + description: (String) + type: string + function: + description: (String) + type: string + index: + description: (String) + type: string + materializedView: + description: (String) + type: string + policy: + description: (String) + type: string + primaryKey: + description: (String) + type: string + rule: + description: (String) + type: string + sequence: + description: (String) + type: string + sequenceOwnedBy: + description: (String) + type: string + sequenceSet: + description: (String) + type: string + table: + description: (String) + type: string + trigger: + description: (String) + type: string + type: + description: (String) + type: string + view: + description: (String) + type: string + type: object + type: array + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + securityGroupsRefs: + description: References to SecurityGroup in vpc to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceSchema: + description: (String) + type: string + slotGigabyteLagLimit: + description: (Number) + type: number + user: + description: (String) + type: string + userRef: + description: Reference to a PostgresqlUser in mdb + to populate user. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSelector: + description: Selector for a PostgresqlUser in mdb + to populate user. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + postgresTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a PostgresqlCluster + in mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a PostgresqlCluster + in mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + databaseRef: + description: Reference to a PostgresqlDatabase in + mdb to populate database. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseSelector: + description: Selector for a PostgresqlDatabase in + mdb to populate database. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + securityGroupsRefs: + description: References to SecurityGroup in vpc to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + user: + description: (String) + type: string + userRef: + description: Reference to a PostgresqlUser in mdb + to populate user. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSelector: + description: Selector for a PostgresqlUser in mdb + to populate user. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ydbSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + changefeedCustomName: + description: (String) + type: string + database: + description: (String) + type: string + instance: + description: (String) + type: string + paths: + description: (List of String) + items: + type: string + type: array + saKeyContentSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceAccountId: + description: (String) + type: string + subnetId: + description: (String) + type: string + type: object + type: array + ydbTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + database: + description: (String) + type: string + defaultCompression: + description: (String) + type: string + instance: + description: (String) + type: string + isTableColumnOriented: + description: (Boolean) + type: boolean + path: + description: (String) + type: string + saKeyContentSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceAccountId: + description: (String) + type: string + subnetId: + description: (String) + type: string + type: object + type: array + ydsSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + allowTtlRewind: + description: (Boolean) + type: boolean + consumer: + description: (String) + type: string + database: + description: (String) + type: string + endpoint: + description: (String) + type: string + parser: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + auditTrailsV1Parser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + cloudLoggingParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + jsonParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + tskvParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceAccountId: + description: (String) + type: string + stream: + description: (String) + type: string + subnetId: + description: (String) + type: string + supportedCodecs: + description: (List of String) + items: + type: string + type: array + type: object + type: array + ydsTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + database: + description: (String) + type: string + endpoint: + description: (String) + type: string + saveTxOrder: + description: (Boolean) + type: boolean + securityGroups: + description: (List of String) + items: + type: string + type: array + serializer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + serializerAuto: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + serializerDebezium: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + serializerParameters: + description: (Block List) (see below for + nested schema) + items: + properties: + key: + description: (Boolean) + type: string + value: + description: (String) + type: string + type: object + type: array + type: object + type: array + serializerJson: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + type: object + type: array + serviceAccountId: + description: (String) + type: string + stream: + description: (String) + type: string + subnetId: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: (String) + type: string + folderId: + description: (String) + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: (Map of String) + type: object + x-kubernetes-map-type: granular + name: + description: (String) + type: string + settings: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + clickhouseSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + clickhouseClusterName: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + database: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + httpPort: + description: (Number) + type: number + nativePort: + description: (Number) + type: number + shards: + description: (Block List) (see below + for nested schema) + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + name: + description: (String) + type: string + type: object + type: array + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + clickhouseTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + altNames: + description: (Block List) (see below for nested schema) + items: + properties: + fromName: + description: (String) + type: string + toName: + description: (String) + type: string + type: object + type: array + cleanupPolicy: + description: (String) + type: string + clickhouseClusterName: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + database: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + httpPort: + description: (Number) + type: number + nativePort: + description: (Number) + type: number + shards: + description: (Block List) (see below + for nested schema) + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + name: + description: (String) + type: string + type: object + type: array + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + sharding: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columnValueHash: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + columnName: + description: (String) + type: string + type: object + type: array + customMapping: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + columnName: + description: (String) + type: string + mapping: + description: (Block List) (see below for + nested schema) + items: + properties: + columnValue: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + stringValue: + description: (String) + type: string + type: object + type: array + shardName: + description: (String) + type: string + type: object + type: array + type: object + type: array + roundRobin: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + transferId: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + type: object + type: array + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + kafkaSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + auth: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + noAuth: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + sasl: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + mechanism: + description: (String) + type: string + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + clusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + brokerUrls: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + parser: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + auditTrailsV1Parser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + cloudLoggingParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + jsonParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + tskvParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + topicName: + description: (String) + type: string + topicNames: + description: (List of String) + items: + type: string + type: array + transformer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + bufferFlushInterval: + description: (String) + type: string + bufferSize: + description: (String) + type: string + cloudFunction: + description: (String) + type: string + invocationTimeout: + description: (String) + type: string + numberOfRetries: + description: (Number) + type: number + serviceAccountId: + description: (String) + type: string + type: object + type: array + type: object + type: array + kafkaTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + auth: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + noAuth: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + sasl: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + mechanism: + description: (String) + type: string + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + clusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + brokerUrls: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serializer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + serializerAuto: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + serializerDebezium: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + serializerParameters: + description: (Block List) (see below for + nested schema) + items: + properties: + key: + description: (Boolean) + type: string + value: + description: (String) + type: string + type: object + type: array + type: object + type: array + serializerJson: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + type: object + type: array + topicSettings: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + topic: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + saveTxOrder: + description: (Boolean) + type: boolean + topicName: + description: (String) + type: string + type: object + type: array + topicPrefix: + description: (String) + type: string + type: object + type: array + type: object + type: array + metrikaSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + counterIds: + description: (List of Number) + items: + type: number + type: array + streams: + description: (Block List) (see below for nested schema) + items: + properties: + columns: + description: (List of String) + items: + type: string + type: array + type: + description: (String) + type: string + type: object + type: array + token: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + type: array + mongoSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + collections: + description: (Block List) (see below for nested schema) + items: + properties: + collectionName: + description: (String) + type: string + databaseName: + description: (String) + type: string + type: object + type: array + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + authSource: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a MongodbCluster + in mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a MongodbCluster + in mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + replicaSet: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + excludedCollections: + description: (Block List) (see below for nested schema) + items: + properties: + collectionName: + description: (String) + type: string + databaseName: + description: (String) + type: string + type: object + type: array + secondaryPreferredMode: + description: (Boolean) + type: boolean + securityGroups: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + mongoTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + authSource: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a MongodbCluster + in mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a MongodbCluster + in mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + replicaSet: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + securityGroups: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + mysqlSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a MySQLCluster in + mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a MySQLCluster in + mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + databaseRef: + description: Reference to a MySQLDatabase in mdb to + populate database. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseSelector: + description: Selector for a MySQLDatabase in mdb to + populate database. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + excludeTablesRegex: + description: (List of String) + items: + type: string + type: array + includeTablesRegex: + description: (List of String) + items: + type: string + type: array + objectTransferSettings: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + routine: + description: (String) + type: string + tables: + description: (String) + type: string + trigger: + description: (String) + type: string + view: + description: (String) + type: string + type: object + type: array + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + securityGroupsRefs: + description: References to SecurityGroup in vpc to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceDatabase: + description: (String) + type: string + timezone: + description: (String) + type: string + user: + description: (String) + type: string + userRef: + description: Reference to a MySQLUser in mdb to populate + user. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSelector: + description: Selector for a MySQLUser in mdb to populate + user. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + mysqlTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a MySQLCluster in + mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a MySQLCluster in + mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + databaseRef: + description: Reference to a MySQLDatabase in mdb to + populate database. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseSelector: + description: Selector for a MySQLDatabase in mdb to + populate database. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + securityGroupsRefs: + description: References to SecurityGroup in vpc to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceDatabase: + description: (String) + type: string + skipConstraintChecks: + description: (Boolean) + type: boolean + sqlMode: + description: (String) + type: string + timezone: + description: (String) + type: string + user: + description: (String) + type: string + userRef: + description: Reference to a MySQLUser in mdb to populate + user. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSelector: + description: Selector for a MySQLUser in mdb to populate + user. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + postgresSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a PostgresqlCluster + in mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a PostgresqlCluster + in mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + databaseRef: + description: Reference to a PostgresqlDatabase in + mdb to populate database. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseSelector: + description: Selector for a PostgresqlDatabase in + mdb to populate database. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + objectTransferSettings: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + cast: + description: (String) + type: string + collation: + description: (String) + type: string + constraint: + description: (String) + type: string + defaultValues: + description: (String) + type: string + fkConstraint: + description: (String) + type: string + function: + description: (String) + type: string + index: + description: (String) + type: string + materializedView: + description: (String) + type: string + policy: + description: (String) + type: string + primaryKey: + description: (String) + type: string + rule: + description: (String) + type: string + sequence: + description: (String) + type: string + sequenceOwnedBy: + description: (String) + type: string + sequenceSet: + description: (String) + type: string + table: + description: (String) + type: string + trigger: + description: (String) + type: string + type: + description: (String) + type: string + view: + description: (String) + type: string + type: object + type: array + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + securityGroupsRefs: + description: References to SecurityGroup in vpc to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceSchema: + description: (String) + type: string + slotGigabyteLagLimit: + description: (Number) + type: number + user: + description: (String) + type: string + userRef: + description: Reference to a PostgresqlUser in mdb + to populate user. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSelector: + description: Selector for a PostgresqlUser in mdb + to populate user. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + postgresTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + mdbClusterIdRef: + description: Reference to a PostgresqlCluster + in mdb to populate mdbClusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mdbClusterIdSelector: + description: Selector for a PostgresqlCluster + in mdb to populate mdbClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + subnetIdRef: + description: Reference to a Subnet in + vpc to populate subnetId. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in + vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + databaseRef: + description: Reference to a PostgresqlDatabase in + mdb to populate database. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseSelector: + description: Selector for a PostgresqlDatabase in + mdb to populate database. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + securityGroupsRefs: + description: References to SecurityGroup in vpc to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + user: + description: (String) + type: string + userRef: + description: Reference to a PostgresqlUser in mdb + to populate user. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSelector: + description: Selector for a PostgresqlUser in mdb + to populate user. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ydbSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + changefeedCustomName: + description: (String) + type: string + database: + description: (String) + type: string + instance: + description: (String) + type: string + paths: + description: (List of String) + items: + type: string + type: array + saKeyContentSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceAccountId: + description: (String) + type: string + subnetId: + description: (String) + type: string + type: object + type: array + ydbTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + database: + description: (String) + type: string + defaultCompression: + description: (String) + type: string + instance: + description: (String) + type: string + isTableColumnOriented: + description: (Boolean) + type: boolean + path: + description: (String) + type: string + saKeyContentSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceAccountId: + description: (String) + type: string + subnetId: + description: (String) + type: string + type: object + type: array + ydsSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + allowTtlRewind: + description: (Boolean) + type: boolean + consumer: + description: (String) + type: string + database: + description: (String) + type: string + endpoint: + description: (String) + type: string + parser: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + auditTrailsV1Parser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + cloudLoggingParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + jsonParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + tskvParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceAccountId: + description: (String) + type: string + stream: + description: (String) + type: string + subnetId: + description: (String) + type: string + supportedCodecs: + description: (List of String) + items: + type: string + type: array + type: object + type: array + ydsTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + database: + description: (String) + type: string + endpoint: + description: (String) + type: string + saveTxOrder: + description: (Boolean) + type: boolean + securityGroups: + description: (List of String) + items: + type: string + type: array + serializer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + serializerAuto: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + serializerDebezium: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + serializerParameters: + description: (Block List) (see below for + nested schema) + items: + properties: + key: + description: (Boolean) + type: string + value: + description: (String) + type: string + type: object + type: array + type: object + type: array + serializerJson: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + type: object + type: array + serviceAccountId: + description: (String) + type: string + stream: + description: (String) + type: string + subnetId: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: EndpointStatus defines the observed state of Endpoint. + properties: + atProvider: + properties: + description: + description: (String) + type: string + folderId: + description: (String) + type: string + id: + description: (String) The ID of this resource. + type: string + labels: + additionalProperties: + type: string + description: (Map of String) + type: object + x-kubernetes-map-type: granular + name: + description: (String) + type: string + settings: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + clickhouseSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + clickhouseClusterName: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + database: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + httpPort: + description: (Number) + type: number + nativePort: + description: (Number) + type: number + shards: + description: (Block List) (see below + for nested schema) + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + name: + description: (String) + type: string + type: object + type: array + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + type: object + type: array + clickhouseTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + altNames: + description: (Block List) (see below for nested schema) + items: + properties: + fromName: + description: (String) + type: string + toName: + description: (String) + type: string + type: object + type: array + cleanupPolicy: + description: (String) + type: string + clickhouseClusterName: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + database: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + httpPort: + description: (Number) + type: number + nativePort: + description: (Number) + type: number + shards: + description: (Block List) (see below + for nested schema) + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + name: + description: (String) + type: string + type: object + type: array + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + sharding: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columnValueHash: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + columnName: + description: (String) + type: string + type: object + type: array + customMapping: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + columnName: + description: (String) + type: string + mapping: + description: (Block List) (see below for + nested schema) + items: + properties: + columnValue: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + stringValue: + description: (String) + type: string + type: object + type: array + shardName: + description: (String) + type: string + type: object + type: array + type: object + type: array + roundRobin: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + transferId: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + type: object + type: array + subnetId: + description: (String) + type: string + type: object + type: array + kafkaSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + auth: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + noAuth: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + sasl: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + mechanism: + description: (String) + type: string + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + clusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + brokerUrls: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + parser: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + auditTrailsV1Parser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + cloudLoggingParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + jsonParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + tskvParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + topicName: + description: (String) + type: string + topicNames: + description: (List of String) + items: + type: string + type: array + transformer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + bufferFlushInterval: + description: (String) + type: string + bufferSize: + description: (String) + type: string + cloudFunction: + description: (String) + type: string + invocationTimeout: + description: (String) + type: string + numberOfRetries: + description: (Number) + type: number + serviceAccountId: + description: (String) + type: string + type: object + type: array + type: object + type: array + kafkaTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + auth: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + noAuth: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + sasl: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + mechanism: + description: (String) + type: string + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + clusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + brokerUrls: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serializer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + serializerAuto: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + serializerDebezium: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + serializerParameters: + description: (Block List) (see below for + nested schema) + items: + properties: + key: + description: (Boolean) + type: string + value: + description: (String) + type: string + type: object + type: array + type: object + type: array + serializerJson: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + type: object + type: array + topicSettings: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + topic: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + saveTxOrder: + description: (Boolean) + type: boolean + topicName: + description: (String) + type: string + type: object + type: array + topicPrefix: + description: (String) + type: string + type: object + type: array + type: object + type: array + metrikaSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + counterIds: + description: (List of Number) + items: + type: number + type: array + streams: + description: (Block List) (see below for nested schema) + items: + properties: + columns: + description: (List of String) + items: + type: string + type: array + type: + description: (String) + type: string + type: object + type: array + token: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + type: array + mongoSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + collections: + description: (Block List) (see below for nested schema) + items: + properties: + collectionName: + description: (String) + type: string + databaseName: + description: (String) + type: string + type: object + type: array + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + authSource: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + replicaSet: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + excludedCollections: + description: (Block List) (see below for nested schema) + items: + properties: + collectionName: + description: (String) + type: string + databaseName: + description: (String) + type: string + type: object + type: array + secondaryPreferredMode: + description: (Boolean) + type: boolean + securityGroups: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + type: object + type: array + mongoTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + connectionOptions: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + authSource: + description: (String) + type: string + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + replicaSet: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + disabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + type: object + type: array + enabled: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + password: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the + secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + user: + description: (String) + type: string + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + securityGroups: + description: (List of String) + items: + type: string + type: array + subnetId: + description: (String) + type: string + type: object + type: array + mysqlSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + excludeTablesRegex: + description: (List of String) + items: + type: string + type: array + includeTablesRegex: + description: (List of String) + items: + type: string + type: array + objectTransferSettings: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + routine: + description: (String) + type: string + tables: + description: (String) + type: string + trigger: + description: (String) + type: string + view: + description: (String) + type: string + type: object + type: array + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceDatabase: + description: (String) + type: string + timezone: + description: (String) + type: string + user: + description: (String) + type: string + type: object + type: array + mysqlTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceDatabase: + description: (String) + type: string + skipConstraintChecks: + description: (Boolean) + type: boolean + sqlMode: + description: (String) + type: string + timezone: + description: (String) + type: string + user: + description: (String) + type: string + type: object + type: array + postgresSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + objectTransferSettings: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + cast: + description: (String) + type: string + collation: + description: (String) + type: string + constraint: + description: (String) + type: string + defaultValues: + description: (String) + type: string + fkConstraint: + description: (String) + type: string + function: + description: (String) + type: string + index: + description: (String) + type: string + materializedView: + description: (String) + type: string + policy: + description: (String) + type: string + primaryKey: + description: (String) + type: string + rule: + description: (String) + type: string + sequence: + description: (String) + type: string + sequenceOwnedBy: + description: (String) + type: string + sequenceSet: + description: (String) + type: string + table: + description: (String) + type: string + trigger: + description: (String) + type: string + type: + description: (String) + type: string + view: + description: (String) + type: string + type: object + type: array + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceSchema: + description: (String) + type: string + slotGigabyteLagLimit: + description: (Number) + type: number + user: + description: (String) + type: string + type: object + type: array + postgresTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + connection: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + mdbClusterId: + description: (String) + type: string + onPremise: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + hosts: + description: (List of String) + items: + type: string + type: array + port: + description: (Number) + type: number + subnetId: + description: (String) + type: string + tlsMode: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + disabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + type: object + type: array + enabled: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + caCertificate: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + database: + description: (String) + type: string + password: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + rawSecretRef: + description: (String, Sensitive) + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + user: + description: (String) + type: string + type: object + type: array + ydbSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + changefeedCustomName: + description: (String) + type: string + database: + description: (String) + type: string + instance: + description: (String) + type: string + paths: + description: (List of String) + items: + type: string + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceAccountId: + description: (String) + type: string + subnetId: + description: (String) + type: string + type: object + type: array + ydbTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + cleanupPolicy: + description: (String) + type: string + database: + description: (String) + type: string + defaultCompression: + description: (String) + type: string + instance: + description: (String) + type: string + isTableColumnOriented: + description: (Boolean) + type: boolean + path: + description: (String) + type: string + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceAccountId: + description: (String) + type: string + subnetId: + description: (String) + type: string + type: object + type: array + ydsSource: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + allowTtlRewind: + description: (Boolean) + type: boolean + consumer: + description: (String) + type: string + database: + description: (String) + type: string + endpoint: + description: (String) + type: string + parser: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + auditTrailsV1Parser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + cloudLoggingParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + jsonParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + tskvParser: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + addRestColumn: + description: (Boolean) + type: boolean + dataSchema: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + fields: + description: '(Block List, Max: + 1) (see below for nested schema)' + items: + properties: + fields: + description: '(Block List, + Max: 1) (see below for nested + schema)' + items: + properties: + key: + description: (Boolean) + type: boolean + name: + description: (String) + type: string + path: + description: (String) + type: string + required: + description: (Boolean) + type: boolean + type: + description: (String) + type: string + type: object + type: array + type: object + type: array + jsonFields: + description: (String) + type: string + type: object + type: array + nullKeysAllowed: + description: (Boolean) + type: boolean + unescapeStringValues: + description: (Boolean) + type: boolean + type: object + type: array + type: object + type: array + securityGroups: + description: (List of String) + items: + type: string + type: array + serviceAccountId: + description: (String) + type: string + stream: + description: (String) + type: string + subnetId: + description: (String) + type: string + supportedCodecs: + description: (List of String) + items: + type: string + type: array + type: object + type: array + ydsTarget: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + database: + description: (String) + type: string + endpoint: + description: (String) + type: string + saveTxOrder: + description: (Boolean) + type: boolean + securityGroups: + description: (List of String) + items: + type: string + type: array + serializer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + serializerAuto: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + serializerDebezium: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + serializerParameters: + description: (Block List) (see below for + nested schema) + items: + properties: + key: + description: (Boolean) + type: string + value: + description: (String) + type: string + type: object + type: array + type: object + type: array + serializerJson: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: object + type: array + type: object + type: array + serviceAccountId: + description: (String) + type: string + stream: + description: (String) + type: string + subnetId: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/datatransfer.yandex-cloud.upjet.crossplane.io_transfers.yaml b/package/crds/datatransfer.yandex-cloud.upjet.crossplane.io_transfers.yaml new file mode 100644 index 0000000..6a61d22 --- /dev/null +++ b/package/crds/datatransfer.yandex-cloud.upjet.crossplane.io_transfers.yaml @@ -0,0 +1,1887 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: transfers.datatransfer.yandex-cloud.upjet.crossplane.io +spec: + group: datatransfer.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Transfer + listKind: TransferList + plural: transfers + singular: transfer + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Transfer is the Schema for the Transfers API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TransferSpec defines the desired state of Transfer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: (String) + type: string + folderId: + description: (String) + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: (Map of String) + type: object + x-kubernetes-map-type: granular + name: + description: (String) + type: string + onCreateActivateMode: + description: (String) + type: string + runtime: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + ycRuntime: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + jobCount: + description: (Number) + type: number + uploadShardParams: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + jobCount: + description: (Number) + type: number + processCount: + description: (Number) + type: number + type: object + type: array + type: object + type: array + type: object + type: array + sourceId: + description: (String) + type: string + sourceIdRef: + description: Reference to a Endpoint to populate sourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceIdSelector: + description: Selector for a Endpoint to populate sourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetId: + description: (String) + type: string + targetIdRef: + description: Reference to a Endpoint to populate targetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetIdSelector: + description: Selector for a Endpoint to populate targetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + transformation: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + transformers: + description: (Block List) (see below for nested schema) + items: + properties: + convertToString: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeColumns: + description: (List of String) + items: + type: string + type: array + includeColumns: + description: (List of String) + items: + type: string + type: array + type: object + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + filterColumns: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeColumns: + description: (List of String) + items: + type: string + type: array + includeColumns: + description: (List of String) + items: + type: string + type: array + type: object + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + filterRows: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + filter: + description: (String) + type: string + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + maskField: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: string + type: array + function: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + maskFunctionHash: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + userDefinedSalt: + description: (String) + type: string + type: object + type: array + type: object + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + renameTables: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + renameTables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + newName: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + name: + description: (String) + type: string + nameSpace: + description: (String) + type: string + type: object + type: array + originalName: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + name: + description: (String) + type: string + nameSpace: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + replacePrimaryKey: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + keys: + description: (List of String) + items: + type: string + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + sharderTransformer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeColumns: + description: (List of String) + items: + type: string + type: array + includeColumns: + description: (List of String) + items: + type: string + type: array + type: object + type: array + shardsCount: + description: (Number) + type: number + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + tableSplitterTransformer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: string + type: array + splitter: + description: (String) + type: string + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: + description: (String) + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: (String) + type: string + folderId: + description: (String) + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: (Map of String) + type: object + x-kubernetes-map-type: granular + name: + description: (String) + type: string + onCreateActivateMode: + description: (String) + type: string + runtime: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + ycRuntime: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + jobCount: + description: (Number) + type: number + uploadShardParams: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + jobCount: + description: (Number) + type: number + processCount: + description: (Number) + type: number + type: object + type: array + type: object + type: array + type: object + type: array + sourceId: + description: (String) + type: string + sourceIdRef: + description: Reference to a Endpoint to populate sourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceIdSelector: + description: Selector for a Endpoint to populate sourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetId: + description: (String) + type: string + targetIdRef: + description: Reference to a Endpoint to populate targetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetIdSelector: + description: Selector for a Endpoint to populate targetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + transformation: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + transformers: + description: (Block List) (see below for nested schema) + items: + properties: + convertToString: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeColumns: + description: (List of String) + items: + type: string + type: array + includeColumns: + description: (List of String) + items: + type: string + type: array + type: object + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + filterColumns: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeColumns: + description: (List of String) + items: + type: string + type: array + includeColumns: + description: (List of String) + items: + type: string + type: array + type: object + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + filterRows: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + filter: + description: (String) + type: string + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + maskField: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: string + type: array + function: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + maskFunctionHash: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + userDefinedSalt: + description: (String) + type: string + type: object + type: array + type: object + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + renameTables: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + renameTables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + newName: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + name: + description: (String) + type: string + nameSpace: + description: (String) + type: string + type: object + type: array + originalName: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + name: + description: (String) + type: string + nameSpace: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + replacePrimaryKey: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + keys: + description: (List of String) + items: + type: string + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + sharderTransformer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeColumns: + description: (List of String) + items: + type: string + type: array + includeColumns: + description: (List of String) + items: + type: string + type: array + type: object + type: array + shardsCount: + description: (Number) + type: number + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + tableSplitterTransformer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: string + type: array + splitter: + description: (String) + type: string + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: + description: (String) + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TransferStatus defines the observed state of Transfer. + properties: + atProvider: + properties: + description: + description: (String) + type: string + folderId: + description: (String) + type: string + id: + description: (String) The ID of this resource. + type: string + labels: + additionalProperties: + type: string + description: (Map of String) + type: object + x-kubernetes-map-type: granular + name: + description: (String) + type: string + onCreateActivateMode: + description: (String) + type: string + runtime: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + ycRuntime: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + jobCount: + description: (Number) + type: number + uploadShardParams: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + jobCount: + description: (Number) + type: number + processCount: + description: (Number) + type: number + type: object + type: array + type: object + type: array + type: object + type: array + sourceId: + description: (String) + type: string + targetId: + description: (String) + type: string + transformation: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + transformers: + description: (Block List) (see below for nested schema) + items: + properties: + convertToString: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeColumns: + description: (List of String) + items: + type: string + type: array + includeColumns: + description: (List of String) + items: + type: string + type: array + type: object + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + filterColumns: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeColumns: + description: (List of String) + items: + type: string + type: array + includeColumns: + description: (List of String) + items: + type: string + type: array + type: object + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + filterRows: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + filter: + description: (String) + type: string + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + maskField: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: string + type: array + function: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + maskFunctionHash: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + userDefinedSalt: + description: (String) + type: string + type: object + type: array + type: object + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + renameTables: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + renameTables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + newName: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + name: + description: (String) + type: string + nameSpace: + description: (String) + type: string + type: object + type: array + originalName: + description: '(Block List, Max: 1) (see + below for nested schema)' + items: + properties: + name: + description: (String) + type: string + nameSpace: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + replacePrimaryKey: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + keys: + description: (List of String) + items: + type: string + type: array + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + sharderTransformer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeColumns: + description: (List of String) + items: + type: string + type: array + includeColumns: + description: (List of String) + items: + type: string + type: array + type: object + type: array + shardsCount: + description: (Number) + type: number + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + tableSplitterTransformer: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + columns: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + type: string + type: array + splitter: + description: (String) + type: string + tables: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + excludeTables: + description: (List of String) + items: + type: string + type: array + includeTables: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: + description: (String) + type: string + warning: + description: (String) + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/dns.yandex-cloud.upjet.crossplane.io_recordsets.yaml b/package/crds/dns.yandex-cloud.upjet.crossplane.io_recordsets.yaml new file mode 100644 index 0000000..281fe8a --- /dev/null +++ b/package/crds/dns.yandex-cloud.upjet.crossplane.io_recordsets.yaml @@ -0,0 +1,548 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: recordsets.dns.yandex-cloud.upjet.crossplane.io +spec: + group: dns.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Recordset + listKind: RecordsetList + plural: recordsets + singular: recordset + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Recordset is the Schema for the Recordsets API. Manages a DNS + Recordset within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RecordsetSpec defines the desired state of Recordset + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + data: + description: The string data for the records in this record set. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The DNS name this record set will apply to. + type: string + ttl: + description: The time-to-live of this record set (seconds). + type: number + type: + description: The DNS record set type. + type: string + zoneId: + description: The id of the zone in which this record set will + reside. + type: string + zoneIdRef: + description: Reference to a Zone to populate zoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + zoneIdSelector: + description: Selector for a Zone to populate zoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + data: + description: The string data for the records in this record set. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The DNS name this record set will apply to. + type: string + ttl: + description: The time-to-live of this record set (seconds). + type: number + type: + description: The DNS record set type. + type: string + zoneId: + description: The id of the zone in which this record set will + reside. + type: string + zoneIdRef: + description: Reference to a Zone to populate zoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + zoneIdSelector: + description: Selector for a Zone to populate zoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.data is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.data) + || (has(self.initProvider) && has(self.initProvider.data))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.ttl is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ttl) + || (has(self.initProvider) && has(self.initProvider.ttl))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: RecordsetStatus defines the observed state of Recordset. + properties: + atProvider: + properties: + data: + description: The string data for the records in this record set. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + type: string + name: + description: The DNS name this record set will apply to. + type: string + ttl: + description: The time-to-live of this record set (seconds). + type: number + type: + description: The DNS record set type. + type: string + zoneId: + description: The id of the zone in which this record set will + reside. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/dns.yandex-cloud.upjet.crossplane.io_zoneiambindings.yaml b/package/crds/dns.yandex-cloud.upjet.crossplane.io_zoneiambindings.yaml new file mode 100644 index 0000000..6c2fa68 --- /dev/null +++ b/package/crds/dns.yandex-cloud.upjet.crossplane.io_zoneiambindings.yaml @@ -0,0 +1,528 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: zoneiambindings.dns.yandex-cloud.upjet.crossplane.io +spec: + group: dns.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ZoneIAMBinding + listKind: ZoneIAMBindingList + plural: zoneiambindings + singular: zoneiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ZoneIAMBinding is the Schema for the ZoneIAMBindings API. Allows + management of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ZoneIAMBindingSpec defines the desired state of ZoneIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dnsZoneId: + description: The DNS Zone ID to apply a binding to. + type: string + dnsZoneIdRef: + description: Reference to a Zone in dns to populate dnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dnsZoneIdSelector: + description: Selector for a Zone in dns to populate dnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dnsZoneId: + description: The DNS Zone ID to apply a binding to. + type: string + dnsZoneIdRef: + description: Reference to a Zone in dns to populate dnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dnsZoneIdSelector: + description: Selector for a Zone in dns to populate dnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: ZoneIAMBindingStatus defines the observed state of ZoneIAMBinding. + properties: + atProvider: + properties: + dnsZoneId: + description: The DNS Zone ID to apply a binding to. + type: string + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/dns.yandex-cloud.upjet.crossplane.io_zones.yaml b/package/crds/dns.yandex-cloud.upjet.crossplane.io_zones.yaml new file mode 100644 index 0000000..9d01bbf --- /dev/null +++ b/package/crds/dns.yandex-cloud.upjet.crossplane.io_zones.yaml @@ -0,0 +1,783 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: zones.dns.yandex-cloud.upjet.crossplane.io +spec: + group: dns.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Zone + listKind: ZoneList + plural: zones + singular: zone + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Zone is the Schema for the Zones API. Manages a DNS Zone within + Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ZoneSpec defines the desired state of Zone + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deletionProtection: + description: |- + (Boolean) Flag that protects the dns zone from accidental deletion. + Flag that protects the dns zone from accidental deletion. + type: boolean + description: + description: |- + (String) Description of the DNS zone. + Description of the DNS zone. + type: string + folderId: + description: |- + (String) ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: |- + (Map of String) A set of key/value label pairs to assign to the DNS zone. + A set of key/value label pairs to assign to the DNS zone. + type: object + x-kubernetes-map-type: granular + name: + description: |- + (String) User assigned name of a specific resource. Must be unique within the folder. + User assigned name of a specific resource. Must be unique within the folder. + type: string + privateNetworks: + description: |- + (Set of String) For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + items: + type: string + type: array + x-kubernetes-list-type: set + privateNetworksRefs: + description: References to Network in vpc to populate privateNetworks. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + privateNetworksSelector: + description: Selector for a list of Network in vpc to populate + privateNetworks. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + public: + description: |- + (Boolean) The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + type: boolean + zone: + description: |- + (String) The DNS name of this zone, e.g. "example.com.". Must ends with dot. + The DNS name of this zone, e.g. "example.com.". Must ends with dot. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deletionProtection: + description: |- + (Boolean) Flag that protects the dns zone from accidental deletion. + Flag that protects the dns zone from accidental deletion. + type: boolean + description: + description: |- + (String) Description of the DNS zone. + Description of the DNS zone. + type: string + folderId: + description: |- + (String) ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: |- + (Map of String) A set of key/value label pairs to assign to the DNS zone. + A set of key/value label pairs to assign to the DNS zone. + type: object + x-kubernetes-map-type: granular + name: + description: |- + (String) User assigned name of a specific resource. Must be unique within the folder. + User assigned name of a specific resource. Must be unique within the folder. + type: string + privateNetworks: + description: |- + (Set of String) For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + items: + type: string + type: array + x-kubernetes-list-type: set + privateNetworksRefs: + description: References to Network in vpc to populate privateNetworks. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + privateNetworksSelector: + description: Selector for a list of Network in vpc to populate + privateNetworks. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + public: + description: |- + (Boolean) The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + type: boolean + zone: + description: |- + (String) The DNS name of this zone, e.g. "example.com.". Must ends with dot. + The DNS name of this zone, e.g. "example.com.". Must ends with dot. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.zone is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.zone) + || (has(self.initProvider) && has(self.initProvider.zone))' + status: + description: ZoneStatus defines the observed state of Zone. + properties: + atProvider: + properties: + createdAt: + description: |- + (String) The DNS zone creation timestamp. + The DNS zone creation timestamp. + type: string + deletionProtection: + description: |- + (Boolean) Flag that protects the dns zone from accidental deletion. + Flag that protects the dns zone from accidental deletion. + type: boolean + description: + description: |- + (String) Description of the DNS zone. + Description of the DNS zone. + type: string + folderId: + description: |- + (String) ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + ID of the folder to create a zone in. If it is not provided, the default provider folder is used. + type: string + id: + description: (String) The ID of this resource. + type: string + labels: + additionalProperties: + type: string + description: |- + (Map of String) A set of key/value label pairs to assign to the DNS zone. + A set of key/value label pairs to assign to the DNS zone. + type: object + x-kubernetes-map-type: granular + name: + description: |- + (String) User assigned name of a specific resource. Must be unique within the folder. + User assigned name of a specific resource. Must be unique within the folder. + type: string + privateNetworks: + description: |- + (Set of String) For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from. + items: + type: string + type: array + x-kubernetes-list-type: set + public: + description: |- + (Boolean) The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. + type: boolean + zone: + description: |- + (String) The DNS name of this zone, e.g. "example.com.". Must ends with dot. + The DNS name of this zone, e.g. "example.com.". Must ends with dot. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/function.yandex-cloud.upjet.crossplane.io_iambindings.yaml b/package/crds/function.yandex-cloud.upjet.crossplane.io_iambindings.yaml new file mode 100644 index 0000000..6bb5edd --- /dev/null +++ b/package/crds/function.yandex-cloud.upjet.crossplane.io_iambindings.yaml @@ -0,0 +1,384 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: iambindings.function.yandex-cloud.upjet.crossplane.io +spec: + group: function.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: IAMBinding + listKind: IAMBindingList + plural: iambindings + singular: iambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: IAMBinding is the Schema for the IAMBindings API. Allows management + of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IAMBindingSpec defines the desired state of IAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + functionId: + description: The Yandex Cloud Function ID to apply a binding to. + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + functionId: + description: The Yandex Cloud Function ID to apply a binding to. + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.functionId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.functionId) + || (has(self.initProvider) && has(self.initProvider.functionId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: IAMBindingStatus defines the observed state of IAMBinding. + properties: + atProvider: + properties: + functionId: + description: The Yandex Cloud Function ID to apply a binding to. + type: string + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/function.yandex-cloud.upjet.crossplane.io_scalingpolicies.yaml b/package/crds/function.yandex-cloud.upjet.crossplane.io_scalingpolicies.yaml new file mode 100644 index 0000000..c810623 --- /dev/null +++ b/package/crds/function.yandex-cloud.upjet.crossplane.io_scalingpolicies.yaml @@ -0,0 +1,397 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: scalingpolicies.function.yandex-cloud.upjet.crossplane.io +spec: + group: function.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ScalingPolicy + listKind: ScalingPolicyList + plural: scalingpolicies + singular: scalingpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ScalingPolicy is the Schema for the ScalingPolicys API. Allows + management of a Yandex Cloud Function Scaling Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScalingPolicySpec defines the desired state of ScalingPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + functionId: + description: Yandex Cloud Function id used to define function + type: string + policy: + description: list definition for Yandex Cloud Function scaling + policies + items: + properties: + tag: + description: Yandex.Cloud Function version tag for Yandex + Cloud Function scaling policy + type: string + zoneInstancesLimit: + description: max number of instances in one zone for Yandex.Cloud + Function with tag + type: number + zoneRequestsLimit: + description: max number of requests in one zone for Yandex.Cloud + Function with tag + type: number + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + functionId: + description: Yandex Cloud Function id used to define function + type: string + policy: + description: list definition for Yandex Cloud Function scaling + policies + items: + properties: + tag: + description: Yandex.Cloud Function version tag for Yandex + Cloud Function scaling policy + type: string + zoneInstancesLimit: + description: max number of instances in one zone for Yandex.Cloud + Function with tag + type: number + zoneRequestsLimit: + description: max number of requests in one zone for Yandex.Cloud + Function with tag + type: number + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.functionId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.functionId) + || (has(self.initProvider) && has(self.initProvider.functionId))' + status: + description: ScalingPolicyStatus defines the observed state of ScalingPolicy. + properties: + atProvider: + properties: + functionId: + description: Yandex Cloud Function id used to define function + type: string + id: + type: string + policy: + description: list definition for Yandex Cloud Function scaling + policies + items: + properties: + tag: + description: Yandex.Cloud Function version tag for Yandex + Cloud Function scaling policy + type: string + zoneInstancesLimit: + description: max number of instances in one zone for Yandex.Cloud + Function with tag + type: number + zoneRequestsLimit: + description: max number of requests in one zone for Yandex.Cloud + Function with tag + type: number + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/function.yandex-cloud.upjet.crossplane.io_triggers.yaml b/package/crds/function.yandex-cloud.upjet.crossplane.io_triggers.yaml new file mode 100644 index 0000000..6b7cbeb --- /dev/null +++ b/package/crds/function.yandex-cloud.upjet.crossplane.io_triggers.yaml @@ -0,0 +1,1506 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: triggers.function.yandex-cloud.upjet.crossplane.io +spec: + group: function.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Trigger + listKind: TriggerList + plural: triggers + singular: trigger + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Trigger is the Schema for the Triggers API. Allows management + of a Yandex Cloud Functions Trigger. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TriggerSpec defines the desired state of Trigger + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + container: + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + path: + description: Path for Yandex.Cloud Serverless Container + for Yandex Cloud Functions Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + containerRegistry: + description: Container Registry settings definition for Yandex + Cloud Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + createImage: + description: Boolean flag for setting create image event + for Yandex Cloud Functions Trigger + type: boolean + createImageTag: + description: Boolean flag for setting create image tag event + for Yandex Cloud Functions Trigger + type: boolean + deleteImage: + description: Boolean flag for setting delete image event + for Yandex Cloud Functions Trigger + type: boolean + deleteImageTag: + description: Boolean flag for setting delete image tag event + for Yandex Cloud Functions Trigger + type: boolean + imageName: + description: Image name filter setting for Yandex Cloud + Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + dataStreams: + description: Data Streams settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + database: + description: Stream database for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + streamName: + description: Stream name for Yandex Cloud Functions Trigger + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Functions Trigger + type: string + dlq: + description: Dead Letter Queue settings definition for Yandex + Cloud Functions Trigger + items: + properties: + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + folderId: + description: Folder ID for the Yandex Cloud Functions Trigger + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + function: + description: Yandex.Cloud Function settings definition for Yandex + Cloud Functions Trigger + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + iot: + description: IoT settings definition for Yandex Cloud Functions + Trigger, if present. Only one section iot or message_queue or + object_storage or timer can be defined. + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + deviceId: + description: IoT Device ID for Yandex Cloud Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + topic: + description: IoT Topic for Yandex Cloud Functions Trigger + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Functions Trigger + type: object + x-kubernetes-map-type: granular + logGroup: + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + logGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + logging: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + groupId: + description: Logging group ID for Yandex Cloud Functions + Trigger + type: string + levels: + description: Logging level filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceIds: + description: Resource ID filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceTypes: + description: Resource type filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + streamNames: + description: Logging stream name filter setting for Yandex + Cloud Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + mail: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + attachmentsBucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + messageQueue: + description: Message Queue settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + visibilityTimeout: + description: Visibility timeout for Yandex Cloud Functions + Trigger + type: string + type: object + type: array + name: + description: Yandex Cloud Functions Trigger name used to define + trigger + type: string + objectStorage: + description: Object Storage settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + bucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + create: + description: Boolean flag for setting create event for Yandex + Cloud Functions Trigger + type: boolean + delete: + description: Boolean flag for setting delete event for Yandex + Cloud Functions Trigger + type: boolean + prefix: + description: Prefix for Object Storage for Yandex Cloud + Functions Trigger + type: string + suffix: + description: Suffix for Object Storage for Yandex Cloud + Functions Trigger + type: string + update: + description: Boolean flag for setting update event for Yandex + Cloud Functions Trigger + type: boolean + type: object + type: array + timer: + description: Timer settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + cronExpression: + description: Cron expression for timer for Yandex Cloud + Functions Trigger + type: string + payload: + description: Payload to be passed to function + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + container: + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + path: + description: Path for Yandex.Cloud Serverless Container + for Yandex Cloud Functions Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + containerRegistry: + description: Container Registry settings definition for Yandex + Cloud Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + createImage: + description: Boolean flag for setting create image event + for Yandex Cloud Functions Trigger + type: boolean + createImageTag: + description: Boolean flag for setting create image tag event + for Yandex Cloud Functions Trigger + type: boolean + deleteImage: + description: Boolean flag for setting delete image event + for Yandex Cloud Functions Trigger + type: boolean + deleteImageTag: + description: Boolean flag for setting delete image tag event + for Yandex Cloud Functions Trigger + type: boolean + imageName: + description: Image name filter setting for Yandex Cloud + Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + dataStreams: + description: Data Streams settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + database: + description: Stream database for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + streamName: + description: Stream name for Yandex Cloud Functions Trigger + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Functions Trigger + type: string + dlq: + description: Dead Letter Queue settings definition for Yandex + Cloud Functions Trigger + items: + properties: + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + folderId: + description: Folder ID for the Yandex Cloud Functions Trigger + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + function: + description: Yandex.Cloud Function settings definition for Yandex + Cloud Functions Trigger + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + iot: + description: IoT settings definition for Yandex Cloud Functions + Trigger, if present. Only one section iot or message_queue or + object_storage or timer can be defined. + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + deviceId: + description: IoT Device ID for Yandex Cloud Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + topic: + description: IoT Topic for Yandex Cloud Functions Trigger + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Functions Trigger + type: object + x-kubernetes-map-type: granular + logGroup: + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + logGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + logging: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + groupId: + description: Logging group ID for Yandex Cloud Functions + Trigger + type: string + levels: + description: Logging level filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceIds: + description: Resource ID filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceTypes: + description: Resource type filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + streamNames: + description: Logging stream name filter setting for Yandex + Cloud Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + mail: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + attachmentsBucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + messageQueue: + description: Message Queue settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + visibilityTimeout: + description: Visibility timeout for Yandex Cloud Functions + Trigger + type: string + type: object + type: array + name: + description: Yandex Cloud Functions Trigger name used to define + trigger + type: string + objectStorage: + description: Object Storage settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + bucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + create: + description: Boolean flag for setting create event for Yandex + Cloud Functions Trigger + type: boolean + delete: + description: Boolean flag for setting delete event for Yandex + Cloud Functions Trigger + type: boolean + prefix: + description: Prefix for Object Storage for Yandex Cloud + Functions Trigger + type: string + suffix: + description: Suffix for Object Storage for Yandex Cloud + Functions Trigger + type: string + update: + description: Boolean flag for setting update event for Yandex + Cloud Functions Trigger + type: boolean + type: object + type: array + timer: + description: Timer settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + cronExpression: + description: Cron expression for timer for Yandex Cloud + Functions Trigger + type: string + payload: + description: Payload to be passed to function + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: TriggerStatus defines the observed state of Trigger. + properties: + atProvider: + properties: + container: + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + path: + description: Path for Yandex.Cloud Serverless Container + for Yandex Cloud Functions Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + containerRegistry: + description: Container Registry settings definition for Yandex + Cloud Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + createImage: + description: Boolean flag for setting create image event + for Yandex Cloud Functions Trigger + type: boolean + createImageTag: + description: Boolean flag for setting create image tag event + for Yandex Cloud Functions Trigger + type: boolean + deleteImage: + description: Boolean flag for setting delete image event + for Yandex Cloud Functions Trigger + type: boolean + deleteImageTag: + description: Boolean flag for setting delete image tag event + for Yandex Cloud Functions Trigger + type: boolean + imageName: + description: Image name filter setting for Yandex Cloud + Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + createdAt: + description: Creation timestamp of the Yandex Cloud Functions + Trigger + type: string + dataStreams: + description: Data Streams settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + database: + description: Stream database for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + streamName: + description: Stream name for Yandex Cloud Functions Trigger + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Functions Trigger + type: string + dlq: + description: Dead Letter Queue settings definition for Yandex + Cloud Functions Trigger + items: + properties: + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + folderId: + description: Folder ID for the Yandex Cloud Functions Trigger + type: string + function: + description: Yandex.Cloud Function settings definition for Yandex + Cloud Functions Trigger + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + iot: + description: IoT settings definition for Yandex Cloud Functions + Trigger, if present. Only one section iot or message_queue or + object_storage or timer can be defined. + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + deviceId: + description: IoT Device ID for Yandex Cloud Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + topic: + description: IoT Topic for Yandex Cloud Functions Trigger + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Functions Trigger + type: object + x-kubernetes-map-type: granular + logGroup: + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + logGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + logging: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + groupId: + description: Logging group ID for Yandex Cloud Functions + Trigger + type: string + levels: + description: Logging level filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceIds: + description: Resource ID filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceTypes: + description: Resource type filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + streamNames: + description: Logging stream name filter setting for Yandex + Cloud Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + mail: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + attachmentsBucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + messageQueue: + description: Message Queue settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + visibilityTimeout: + description: Visibility timeout for Yandex Cloud Functions + Trigger + type: string + type: object + type: array + name: + description: Yandex Cloud Functions Trigger name used to define + trigger + type: string + objectStorage: + description: Object Storage settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + bucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + create: + description: Boolean flag for setting create event for Yandex + Cloud Functions Trigger + type: boolean + delete: + description: Boolean flag for setting delete event for Yandex + Cloud Functions Trigger + type: boolean + prefix: + description: Prefix for Object Storage for Yandex Cloud + Functions Trigger + type: string + suffix: + description: Suffix for Object Storage for Yandex Cloud + Functions Trigger + type: string + update: + description: Boolean flag for setting update event for Yandex + Cloud Functions Trigger + type: boolean + type: object + type: array + timer: + description: Timer settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + cronExpression: + description: Cron expression for timer for Yandex Cloud + Functions Trigger + type: string + payload: + description: Payload to be passed to function + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountapikeys.yaml b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountapikeys.yaml new file mode 100644 index 0000000..da49d80 --- /dev/null +++ b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountapikeys.yaml @@ -0,0 +1,596 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: serviceaccountapikeys.iam.yandex-cloud.upjet.crossplane.io +spec: + group: iam.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ServiceAccountAPIKey + listKind: ServiceAccountAPIKeyList + plural: serviceaccountapikeys + singular: serviceaccountapikey + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServiceAccountAPIKey is the Schema for the ServiceAccountAPIKeys + API. Allows management of a Yandex.Cloud IAM service account API key. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceAccountAPIKeySpec defines the desired state of ServiceAccountAPIKey + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the key. + type: string + expiresAt: + description: The key will be no longer valid after expiration + timestamp. + type: string + outputToLockbox: + description: option to create a Lockbox secret version from sensitive + outputs + items: + properties: + entryForSecretKey: + description: |- + Entry where to store the value of secret_key. + entry that will store the value of secret_key + type: string + secretId: + description: |- + ID of the Lockbox secret where to store the sensible values. + secret where to add the version with the sensitive values + type: string + type: object + type: array + pgpKey: + description: An optional PGP key to encrypt the resulting secret + key material. May either be a base64-encoded public key or a + keybase username in the form keybase:keybaseusername. + type: string + scope: + description: The scope of the key. + type: string + serviceAccountId: + description: ID of the service account to an API key for. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the key. + type: string + expiresAt: + description: The key will be no longer valid after expiration + timestamp. + type: string + outputToLockbox: + description: option to create a Lockbox secret version from sensitive + outputs + items: + properties: + entryForSecretKey: + description: |- + Entry where to store the value of secret_key. + entry that will store the value of secret_key + type: string + secretId: + description: |- + ID of the Lockbox secret where to store the sensible values. + secret where to add the version with the sensitive values + type: string + type: object + type: array + pgpKey: + description: An optional PGP key to encrypt the resulting secret + key material. May either be a base64-encoded public key or a + keybase username in the form keybase:keybaseusername. + type: string + scope: + description: The scope of the key. + type: string + serviceAccountId: + description: ID of the service account to an API key for. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ServiceAccountAPIKeyStatus defines the observed state of + ServiceAccountAPIKey. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the static access key. + type: string + description: + description: The description of the key. + type: string + encryptedSecretKey: + description: The encrypted secret key, base64 encoded. This is + only populated when pgp_key is supplied. + type: string + expiresAt: + description: The key will be no longer valid after expiration + timestamp. + type: string + id: + type: string + keyFingerprint: + description: The fingerprint of the PGP key used to encrypt the + secret key. This is only populated when pgp_key is supplied. + type: string + outputToLockbox: + description: option to create a Lockbox secret version from sensitive + outputs + items: + properties: + entryForSecretKey: + description: |- + Entry where to store the value of secret_key. + entry that will store the value of secret_key + type: string + secretId: + description: |- + ID of the Lockbox secret where to store the sensible values. + secret where to add the version with the sensitive values + type: string + type: object + type: array + outputToLockboxVersionId: + description: |- + ID of the Lockbox secret version that contains the value of secret_key. This is only populated when output_to_lockbox is supplied. This version will be destroyed when the IAM key is destroyed, or when output_to_lockbox is removed. + version generated, that will contain the sensitive outputs + type: string + pgpKey: + description: An optional PGP key to encrypt the resulting secret + key material. May either be a base64-encoded public key or a + keybase username in the form keybase:keybaseusername. + type: string + scope: + description: The scope of the key. + type: string + serviceAccountId: + description: ID of the service account to an API key for. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountiambindings.yaml b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountiambindings.yaml new file mode 100644 index 0000000..b2290bc --- /dev/null +++ b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountiambindings.yaml @@ -0,0 +1,686 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: serviceaccountiambindings.iam.yandex-cloud.upjet.crossplane.io +spec: + group: iam.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ServiceAccountIAMBinding + listKind: ServiceAccountIAMBindingList + plural: serviceaccountiambindings + singular: serviceaccountiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServiceAccountIAMBinding is the Schema for the ServiceAccountIAMBindings + API. Allows management of a single IAM binding for a Yandex IAM service + account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceAccountIAMBindingSpec defines the desired state of + ServiceAccountIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. Only one yandex_iam_service_account_iam_binding + can be used per role. + type: string + serviceAccountId: + description: The service account ID to apply a binding to. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountRef: + description: References to ServiceAccount to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. Only one yandex_iam_service_account_iam_binding + can be used per role. + type: string + serviceAccountId: + description: The service account ID to apply a binding to. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountRef: + description: References to ServiceAccount to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: ServiceAccountIAMBindingStatus defines the observed state + of ServiceAccountIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. Only one yandex_iam_service_account_iam_binding + can be used per role. + type: string + serviceAccountId: + description: The service account ID to apply a binding to. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountiammembers.yaml b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountiammembers.yaml new file mode 100644 index 0000000..6850491 --- /dev/null +++ b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountiammembers.yaml @@ -0,0 +1,669 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: serviceaccountiammembers.iam.yandex-cloud.upjet.crossplane.io +spec: + group: iam.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ServiceAccountIAMMember + listKind: ServiceAccountIAMMemberList + plural: serviceaccountiammembers + singular: serviceaccountiammember + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServiceAccountIAMMember is the Schema for the ServiceAccountIAMMembers + API. Allows management of a single member for a single IAM binding for a + Yandex IAM service account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceAccountIAMMemberSpec defines the desired state of + ServiceAccountIAMMember + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + member: + description: 'Identity that will be granted the privilege in role. + Entry can have one of the following values:' + type: string + role: + description: The role that should be applied. Only one yandex_iam_service_account_iam_binding + can be used per role. + type: string + serviceAccountId: + description: The service account ID to apply a policy to. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountRef: + description: Reference to a ServiceAccount to populate member. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountSelector: + description: Selector for a ServiceAccount to populate member. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + member: + description: 'Identity that will be granted the privilege in role. + Entry can have one of the following values:' + type: string + role: + description: The role that should be applied. Only one yandex_iam_service_account_iam_binding + can be used per role. + type: string + serviceAccountId: + description: The service account ID to apply a policy to. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountRef: + description: Reference to a ServiceAccount to populate member. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountSelector: + description: Selector for a ServiceAccount to populate member. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: ServiceAccountIAMMemberStatus defines the observed state + of ServiceAccountIAMMember. + properties: + atProvider: + properties: + id: + type: string + member: + description: 'Identity that will be granted the privilege in role. + Entry can have one of the following values:' + type: string + role: + description: The role that should be applied. Only one yandex_iam_service_account_iam_binding + can be used per role. + type: string + serviceAccountId: + description: The service account ID to apply a policy to. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountiampolicies.yaml b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountiampolicies.yaml new file mode 100644 index 0000000..d4ff240 --- /dev/null +++ b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountiampolicies.yaml @@ -0,0 +1,502 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: serviceaccountiampolicies.iam.yandex-cloud.upjet.crossplane.io +spec: + group: iam.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ServiceAccountIAMPolicy + listKind: ServiceAccountIAMPolicyList + plural: serviceaccountiampolicies + singular: serviceaccountiampolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServiceAccountIAMPolicy is the Schema for the ServiceAccountIAMPolicys + API. Allows management of the IAM policy for a Yandex IAM service account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceAccountIAMPolicySpec defines the desired state of + ServiceAccountIAMPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + policyData: + description: The policy data generated by a yandex_iam_policy + data source. + type: string + serviceAccountId: + description: The service account ID to apply a policy to. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + policyData: + description: The policy data generated by a yandex_iam_policy + data source. + type: string + serviceAccountId: + description: The service account ID to apply a policy to. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.policyData is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.policyData) + || (has(self.initProvider) && has(self.initProvider.policyData))' + status: + description: ServiceAccountIAMPolicyStatus defines the observed state + of ServiceAccountIAMPolicy. + properties: + atProvider: + properties: + id: + type: string + policyData: + description: The policy data generated by a yandex_iam_policy + data source. + type: string + serviceAccountId: + description: The service account ID to apply a policy to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountkeys.yaml b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountkeys.yaml new file mode 100644 index 0000000..2ae76ff --- /dev/null +++ b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountkeys.yaml @@ -0,0 +1,604 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: serviceaccountkeys.iam.yandex-cloud.upjet.crossplane.io +spec: + group: iam.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ServiceAccountKey + listKind: ServiceAccountKeyList + plural: serviceaccountkeys + singular: serviceaccountkey + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServiceAccountKey is the Schema for the ServiceAccountKeys API. + Allows management of a Yandex.Cloud IAM service account key. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceAccountKeySpec defines the desired state of ServiceAccountKey + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the key pair. + type: string + format: + description: The output format of the keys. PEM_FILE is the default + format. + type: string + keyAlgorithm: + description: The algorithm used to generate the key. RSA_2048 + is the default algorithm. Valid values are listed in the API + reference. + type: string + outputToLockbox: + description: option to create a Lockbox secret version from sensitive + outputs + items: + properties: + entryForPrivateKey: + description: |- + Entry where to store the value of private_key. + entry that will store the value of private_key + type: string + secretId: + description: |- + ID of the Lockbox secret where to store the sensible values. + secret where to add the version with the sensitive values + type: string + type: object + type: array + pgpKey: + description: An optional PGP key to encrypt the resulting private + key material. May either be a base64-encoded public key or a + keybase username in the form keybase:keybaseusername. + type: string + serviceAccountId: + description: ID of the service account to create a pair for. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the key pair. + type: string + format: + description: The output format of the keys. PEM_FILE is the default + format. + type: string + keyAlgorithm: + description: The algorithm used to generate the key. RSA_2048 + is the default algorithm. Valid values are listed in the API + reference. + type: string + outputToLockbox: + description: option to create a Lockbox secret version from sensitive + outputs + items: + properties: + entryForPrivateKey: + description: |- + Entry where to store the value of private_key. + entry that will store the value of private_key + type: string + secretId: + description: |- + ID of the Lockbox secret where to store the sensible values. + secret where to add the version with the sensitive values + type: string + type: object + type: array + pgpKey: + description: An optional PGP key to encrypt the resulting private + key material. May either be a base64-encoded public key or a + keybase username in the form keybase:keybaseusername. + type: string + serviceAccountId: + description: ID of the service account to create a pair for. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ServiceAccountKeyStatus defines the observed state of ServiceAccountKey. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the static access key. + type: string + description: + description: The description of the key pair. + type: string + encryptedPrivateKey: + description: The encrypted private key, base64 encoded. This is + only populated when pgp_key is supplied. + type: string + format: + description: The output format of the keys. PEM_FILE is the default + format. + type: string + id: + type: string + keyAlgorithm: + description: The algorithm used to generate the key. RSA_2048 + is the default algorithm. Valid values are listed in the API + reference. + type: string + keyFingerprint: + description: The fingerprint of the PGP key used to encrypt the + private key. This is only populated when pgp_key is supplied. + type: string + outputToLockbox: + description: option to create a Lockbox secret version from sensitive + outputs + items: + properties: + entryForPrivateKey: + description: |- + Entry where to store the value of private_key. + entry that will store the value of private_key + type: string + secretId: + description: |- + ID of the Lockbox secret where to store the sensible values. + secret where to add the version with the sensitive values + type: string + type: object + type: array + outputToLockboxVersionId: + description: |- + ID of the Lockbox secret version that contains the value of private_key. This is only populated when output_to_lockbox is supplied. This version will be destroyed when the IAM key is destroyed, or when output_to_lockbox is removed. + version generated, that will contain the sensitive outputs + type: string + pgpKey: + description: An optional PGP key to encrypt the resulting private + key material. May either be a base64-encoded public key or a + keybase username in the form keybase:keybaseusername. + type: string + publicKey: + description: The public key. + type: string + serviceAccountId: + description: ID of the service account to create a pair for. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccounts.yaml b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccounts.yaml new file mode 100644 index 0000000..857833a --- /dev/null +++ b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccounts.yaml @@ -0,0 +1,501 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: serviceaccounts.iam.yandex-cloud.upjet.crossplane.io +spec: + group: iam.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ServiceAccount + listKind: ServiceAccountList + plural: serviceaccounts + singular: serviceaccount + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServiceAccount is the Schema for the ServiceAccounts API. Allows + management of a Yandex.Cloud IAM service account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceAccountSpec defines the desired state of ServiceAccount + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the service account. + type: string + folderId: + description: ID of the folder that the service account will be + created in. Defaults to the provider folder configuration. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the service account. + type: string + folderId: + description: ID of the folder that the service account will be + created in. Defaults to the provider folder configuration. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ServiceAccountStatus defines the observed state of ServiceAccount. + properties: + atProvider: + properties: + createdAt: + type: string + description: + description: Description of the service account. + type: string + folderId: + description: ID of the folder that the service account will be + created in. Defaults to the provider folder configuration. + type: string + id: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountstaticaccesskeys.yaml b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountstaticaccesskeys.yaml new file mode 100644 index 0000000..4e57e35 --- /dev/null +++ b/package/crds/iam.yandex-cloud.upjet.crossplane.io_serviceaccountstaticaccesskeys.yaml @@ -0,0 +1,599 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: serviceaccountstaticaccesskeys.iam.yandex-cloud.upjet.crossplane.io +spec: + group: iam.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ServiceAccountStaticAccessKey + listKind: ServiceAccountStaticAccessKeyList + plural: serviceaccountstaticaccesskeys + singular: serviceaccountstaticaccesskey + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServiceAccountStaticAccessKey is the Schema for the ServiceAccountStaticAccessKeys + API. Allows management of a Yandex.Cloud IAM service account static access + key. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceAccountStaticAccessKeySpec defines the desired state + of ServiceAccountStaticAccessKey + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the service account static key. + type: string + outputToLockbox: + description: option to create a Lockbox secret version from sensitive + outputs + items: + properties: + entryForAccessKey: + description: |- + Entry where to store the value of access_key. + entry that will store the value of access_key + type: string + entryForSecretKey: + description: |- + Entry where to store the value of secret_key. + entry that will store the value of secret_key + type: string + secretId: + description: |- + ID of the Lockbox secret where to store the sensible values. + secret where to add the version with the sensitive values + type: string + type: object + type: array + pgpKey: + description: An optional PGP key to encrypt the resulting secret + key material. May either be a base64-encoded public key or a + keybase username in the form keybase:keybaseusername. + type: string + serviceAccountId: + description: ID of the service account which is used to get a + static key. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the service account static key. + type: string + outputToLockbox: + description: option to create a Lockbox secret version from sensitive + outputs + items: + properties: + entryForAccessKey: + description: |- + Entry where to store the value of access_key. + entry that will store the value of access_key + type: string + entryForSecretKey: + description: |- + Entry where to store the value of secret_key. + entry that will store the value of secret_key + type: string + secretId: + description: |- + ID of the Lockbox secret where to store the sensible values. + secret where to add the version with the sensitive values + type: string + type: object + type: array + pgpKey: + description: An optional PGP key to encrypt the resulting secret + key material. May either be a base64-encoded public key or a + keybase username in the form keybase:keybaseusername. + type: string + serviceAccountId: + description: ID of the service account which is used to get a + static key. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ServiceAccountStaticAccessKeyStatus defines the observed + state of ServiceAccountStaticAccessKey. + properties: + atProvider: + properties: + accessKey: + description: ID of the static access key. This is only populated + when output_to_lockbox is not provided. + type: string + createdAt: + description: Creation timestamp of the static access key. + type: string + description: + description: The description of the service account static key. + type: string + encryptedSecretKey: + description: The encrypted secret, base64 encoded. This is only + populated when pgp_key is supplied. + type: string + id: + type: string + keyFingerprint: + description: The fingerprint of the PGP key used to encrypt the + secret key. This is only populated when pgp_key is supplied. + type: string + outputToLockbox: + description: option to create a Lockbox secret version from sensitive + outputs + items: + properties: + entryForAccessKey: + description: |- + Entry where to store the value of access_key. + entry that will store the value of access_key + type: string + entryForSecretKey: + description: |- + Entry where to store the value of secret_key. + entry that will store the value of secret_key + type: string + secretId: + description: |- + ID of the Lockbox secret where to store the sensible values. + secret where to add the version with the sensitive values + type: string + type: object + type: array + outputToLockboxVersionId: + description: |- + ID of the Lockbox secret version that contains the values of access_key and secret_key. This is only populated when output_to_lockbox is supplied. This version will be destroyed when the IAM key is destroyed, or when output_to_lockbox is removed. + version generated, that will contain the sensitive outputs + type: string + pgpKey: + description: An optional PGP key to encrypt the resulting secret + key material. May either be a base64-encoded public key or a + keybase username in the form keybase:keybaseusername. + type: string + serviceAccountId: + description: ID of the service account which is used to get a + static key. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iot.yandex-cloud.upjet.crossplane.io_corebrokers.yaml b/package/crds/iot.yandex-cloud.upjet.crossplane.io_corebrokers.yaml new file mode 100644 index 0000000..ce14c5c --- /dev/null +++ b/package/crds/iot.yandex-cloud.upjet.crossplane.io_corebrokers.yaml @@ -0,0 +1,612 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: corebrokers.iot.yandex-cloud.upjet.crossplane.io +spec: + group: iot.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CoreBroker + listKind: CoreBrokerList + plural: corebrokers + singular: corebroker + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CoreBroker is the Schema for the CoreBrokers API. Allows management + of a Yandex.Cloud IoT Core Broker. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CoreBrokerSpec defines the desired state of CoreBroker + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Broker + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Broker + type: string + folderId: + description: Folder ID for the IoT Core Broker + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Broker. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Broker + items: + properties: + disabled: + description: Is logging for broker disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Broker name used to define broker + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Broker + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Broker + type: string + folderId: + description: Folder ID for the IoT Core Broker + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Broker. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Broker + items: + properties: + disabled: + description: Is logging for broker disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Broker name used to define broker + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: CoreBrokerStatus defines the observed state of CoreBroker. + properties: + atProvider: + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Broker + items: + type: string + type: array + x-kubernetes-list-type: set + createdAt: + description: Creation timestamp of the IoT Core Broker + type: string + description: + description: Description of the IoT Core Broker + type: string + folderId: + description: Folder ID for the IoT Core Broker + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Broker. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Broker + items: + properties: + disabled: + description: Is logging for broker disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Broker name used to define broker + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iot.yandex-cloud.upjet.crossplane.io_coredevices.yaml b/package/crds/iot.yandex-cloud.upjet.crossplane.io_coredevices.yaml new file mode 100644 index 0000000..4646788 --- /dev/null +++ b/package/crds/iot.yandex-cloud.upjet.crossplane.io_coredevices.yaml @@ -0,0 +1,432 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: coredevices.iot.yandex-cloud.upjet.crossplane.io +spec: + group: iot.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CoreDevice + listKind: CoreDeviceList + plural: coredevices + singular: coredevice + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CoreDevice is the Schema for the CoreDevices API. Allows management + of a Yandex.Cloud IoT Core Device. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CoreDeviceSpec defines the desired state of CoreDevice + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aliases: + additionalProperties: + type: string + description: A set of key/value aliases pairs to assign to the + IoT Core Device + type: object + x-kubernetes-map-type: granular + certificates: + description: A set of certificate's fingerprints for the IoT Core + Device + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Device + type: string + name: + description: IoT Core Device name used to define device + type: string + passwordsSecretRef: + description: A set of passwords's id for the IoT Core Device + items: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: array + registryId: + description: IoT Core Registry ID for the IoT Core Device + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aliases: + additionalProperties: + type: string + description: A set of key/value aliases pairs to assign to the + IoT Core Device + type: object + x-kubernetes-map-type: granular + certificates: + description: A set of certificate's fingerprints for the IoT Core + Device + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Device + type: string + name: + description: IoT Core Device name used to define device + type: string + passwordsSecretRef: + items: + type: string + type: array + registryId: + description: IoT Core Registry ID for the IoT Core Device + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.registryId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.registryId) + || (has(self.initProvider) && has(self.initProvider.registryId))' + status: + description: CoreDeviceStatus defines the observed state of CoreDevice. + properties: + atProvider: + properties: + aliases: + additionalProperties: + type: string + description: A set of key/value aliases pairs to assign to the + IoT Core Device + type: object + x-kubernetes-map-type: granular + certificates: + description: A set of certificate's fingerprints for the IoT Core + Device + items: + type: string + type: array + x-kubernetes-list-type: set + createdAt: + description: Creation timestamp of the IoT Core Device + type: string + description: + description: Description of the IoT Core Device + type: string + id: + type: string + name: + description: IoT Core Device name used to define device + type: string + registryId: + description: IoT Core Registry ID for the IoT Core Device + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iot.yandex-cloud.upjet.crossplane.io_coreregistries.yaml b/package/crds/iot.yandex-cloud.upjet.crossplane.io_coreregistries.yaml new file mode 100644 index 0000000..78cad3c --- /dev/null +++ b/package/crds/iot.yandex-cloud.upjet.crossplane.io_coreregistries.yaml @@ -0,0 +1,637 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: coreregistries.iot.yandex-cloud.upjet.crossplane.io +spec: + group: iot.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CoreRegistry + listKind: CoreRegistryList + plural: coreregistries + singular: coreregistry + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CoreRegistry is the Schema for the CoreRegistrys API. Allows + management of a Yandex.Cloud IoT Core Registry. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CoreRegistrySpec defines the desired state of CoreRegistry + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Registry + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Registry + type: string + folderId: + description: Folder ID for the IoT Core Registry + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Registry. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Registry + items: + properties: + disabled: + description: Is logging for registry disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Device name used to define registry + type: string + passwordsSecretRef: + description: A set of passwords's id for the IoT Core Registry + items: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Registry + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Registry + type: string + folderId: + description: Folder ID for the IoT Core Registry + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Registry. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Registry + items: + properties: + disabled: + description: Is logging for registry disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Device name used to define registry + type: string + passwordsSecretRef: + items: + type: string + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: CoreRegistryStatus defines the observed state of CoreRegistry. + properties: + atProvider: + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Registry + items: + type: string + type: array + x-kubernetes-list-type: set + createdAt: + description: Creation timestamp of the IoT Core Registry + type: string + description: + description: Description of the IoT Core Registry + type: string + folderId: + description: Folder ID for the IoT Core Registry + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Registry. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Registry + items: + properties: + disabled: + description: Is logging for registry disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Device name used to define registry + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricencryptionkeyiambindings.yaml b/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricencryptionkeyiambindings.yaml new file mode 100644 index 0000000..f34d0f9 --- /dev/null +++ b/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricencryptionkeyiambindings.yaml @@ -0,0 +1,689 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: asymmetricencryptionkeyiambindings.kms.yandex-cloud.upjet.crossplane.io +spec: + group: kms.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: AsymmetricEncryptionKeyIAMBinding + listKind: AsymmetricEncryptionKeyIAMBindingList + plural: asymmetricencryptionkeyiambindings + singular: asymmetricencryptionkeyiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: AsymmetricEncryptionKeyIAMBinding is the Schema for the AsymmetricEncryptionKeyIAMBindings + API. Allows management of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AsymmetricEncryptionKeyIAMBindingSpec defines the desired + state of AsymmetricEncryptionKeyIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + asymmetricEncryptionKeyId: + description: The Yandex Key Management Service Asymmetric Encryption + Key ID to apply a binding to. + type: string + asymmetricEncryptionKeyIdRef: + description: Reference to a AsymmetricEncryptionKey to populate + asymmetricEncryptionKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + asymmetricEncryptionKeyIdSelector: + description: Selector for a AsymmetricEncryptionKey to populate + asymmetricEncryptionKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + asymmetricEncryptionKeyId: + description: The Yandex Key Management Service Asymmetric Encryption + Key ID to apply a binding to. + type: string + asymmetricEncryptionKeyIdRef: + description: Reference to a AsymmetricEncryptionKey to populate + asymmetricEncryptionKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + asymmetricEncryptionKeyIdSelector: + description: Selector for a AsymmetricEncryptionKey to populate + asymmetricEncryptionKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: AsymmetricEncryptionKeyIAMBindingStatus defines the observed + state of AsymmetricEncryptionKeyIAMBinding. + properties: + atProvider: + properties: + asymmetricEncryptionKeyId: + description: The Yandex Key Management Service Asymmetric Encryption + Key ID to apply a binding to. + type: string + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricencryptionkeys.yaml b/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricencryptionkeys.yaml new file mode 100644 index 0000000..408fcf4 --- /dev/null +++ b/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricencryptionkeys.yaml @@ -0,0 +1,553 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: asymmetricencryptionkeys.kms.yandex-cloud.upjet.crossplane.io +spec: + group: kms.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: AsymmetricEncryptionKey + listKind: AsymmetricEncryptionKeyList + plural: asymmetricencryptionkeys + singular: asymmetricencryptionkey + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: AsymmetricEncryptionKey is the Schema for the AsymmetricEncryptionKeys + API. Creates a Yandex KMS asymmetric encryption key that can be used for + cryptographic operation. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AsymmetricEncryptionKeySpec defines the desired state of + AsymmetricEncryptionKey + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deletionProtection: + type: boolean + description: + description: An optional description of the key. + type: string + encryptionAlgorithm: + description: Encryption algorithm to be used with a new key. The + default value is RSA_2048_ENC_OAEP_SHA_256. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the key. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the key. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deletionProtection: + type: boolean + description: + description: An optional description of the key. + type: string + encryptionAlgorithm: + description: Encryption algorithm to be used with a new key. The + default value is RSA_2048_ENC_OAEP_SHA_256. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the key. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the key. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AsymmetricEncryptionKeyStatus defines the observed state + of AsymmetricEncryptionKey. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the key. + type: string + deletionProtection: + type: boolean + description: + description: An optional description of the key. + type: string + encryptionAlgorithm: + description: Encryption algorithm to be used with a new key. The + default value is RSA_2048_ENC_OAEP_SHA_256. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the key. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the key. + type: string + status: + description: The status of the key. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricsignaturekeyiambindings.yaml b/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricsignaturekeyiambindings.yaml new file mode 100644 index 0000000..30986b4 --- /dev/null +++ b/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricsignaturekeyiambindings.yaml @@ -0,0 +1,689 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: asymmetricsignaturekeyiambindings.kms.yandex-cloud.upjet.crossplane.io +spec: + group: kms.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: AsymmetricSignatureKeyIAMBinding + listKind: AsymmetricSignatureKeyIAMBindingList + plural: asymmetricsignaturekeyiambindings + singular: asymmetricsignaturekeyiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: AsymmetricSignatureKeyIAMBinding is the Schema for the AsymmetricSignatureKeyIAMBindings + API. Allows management of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AsymmetricSignatureKeyIAMBindingSpec defines the desired + state of AsymmetricSignatureKeyIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + asymmetricSignatureKeyId: + description: The Yandex Key Management Service Asymmetric Signature + Key ID to apply a binding to. + type: string + asymmetricSignatureKeyIdRef: + description: Reference to a AsymmetricSignatureKey to populate + asymmetricSignatureKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + asymmetricSignatureKeyIdSelector: + description: Selector for a AsymmetricSignatureKey to populate + asymmetricSignatureKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + asymmetricSignatureKeyId: + description: The Yandex Key Management Service Asymmetric Signature + Key ID to apply a binding to. + type: string + asymmetricSignatureKeyIdRef: + description: Reference to a AsymmetricSignatureKey to populate + asymmetricSignatureKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + asymmetricSignatureKeyIdSelector: + description: Selector for a AsymmetricSignatureKey to populate + asymmetricSignatureKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: AsymmetricSignatureKeyIAMBindingStatus defines the observed + state of AsymmetricSignatureKeyIAMBinding. + properties: + atProvider: + properties: + asymmetricSignatureKeyId: + description: The Yandex Key Management Service Asymmetric Signature + Key ID to apply a binding to. + type: string + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricsignaturekeys.yaml b/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricsignaturekeys.yaml new file mode 100644 index 0000000..19e3a6a --- /dev/null +++ b/package/crds/kms.yandex-cloud.upjet.crossplane.io_asymmetricsignaturekeys.yaml @@ -0,0 +1,552 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: asymmetricsignaturekeys.kms.yandex-cloud.upjet.crossplane.io +spec: + group: kms.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: AsymmetricSignatureKey + listKind: AsymmetricSignatureKeyList + plural: asymmetricsignaturekeys + singular: asymmetricsignaturekey + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: AsymmetricSignatureKey is the Schema for the AsymmetricSignatureKeys + API. Creates a Yandex KMS asymmetric signature key that can be used for + cryptographic operation. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AsymmetricSignatureKeySpec defines the desired state of AsymmetricSignatureKey + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deletionProtection: + type: boolean + description: + description: An optional description of the key. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the key. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the key. + type: string + signatureAlgorithm: + description: Signature algorithm to be used with a new key. The + default value is RSA_2048_SIGN_PSS_SHA_256. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deletionProtection: + type: boolean + description: + description: An optional description of the key. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the key. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the key. + type: string + signatureAlgorithm: + description: Signature algorithm to be used with a new key. The + default value is RSA_2048_SIGN_PSS_SHA_256. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AsymmetricSignatureKeyStatus defines the observed state of + AsymmetricSignatureKey. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the key. + type: string + deletionProtection: + type: boolean + description: + description: An optional description of the key. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the key. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the key. + type: string + signatureAlgorithm: + description: Signature algorithm to be used with a new key. The + default value is RSA_2048_SIGN_PSS_SHA_256. + type: string + status: + description: The status of the key. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kms.yandex-cloud.upjet.crossplane.io_secretciphertexts.yaml b/package/crds/kms.yandex-cloud.upjet.crossplane.io_secretciphertexts.yaml new file mode 100644 index 0000000..f1e7d5a --- /dev/null +++ b/package/crds/kms.yandex-cloud.upjet.crossplane.io_secretciphertexts.yaml @@ -0,0 +1,544 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: secretciphertexts.kms.yandex-cloud.upjet.crossplane.io +spec: + group: kms.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SecretCiphertext + listKind: SecretCiphertextList + plural: secretciphertexts + singular: secretciphertext + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SecretCiphertext is the Schema for the SecretCiphertexts API. + Encrypts given plaintext with the specified Yandex KMS key and provides + access to the ciphertext. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecretCiphertextSpec defines the desired state of SecretCiphertext + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aadContext: + description: Additional authenticated data (AAD context), optional. + If specified, this data will be required for decryption with + the SymmetricDecryptRequest + type: string + keyId: + description: ID of the symmetric KMS key to use for encryption. + type: string + keyIdRef: + description: Reference to a SymmetricKey to populate keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a SymmetricKey to populate keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + plaintextSecretRef: + description: Plaintext to be encrypted. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aadContext: + description: Additional authenticated data (AAD context), optional. + If specified, this data will be required for decryption with + the SymmetricDecryptRequest + type: string + keyId: + description: ID of the symmetric KMS key to use for encryption. + type: string + keyIdRef: + description: Reference to a SymmetricKey to populate keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a SymmetricKey to populate keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + plaintextSecretRef: + description: Plaintext to be encrypted. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - plaintextSecretRef + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.plaintextSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.plaintextSecretRef)' + status: + description: SecretCiphertextStatus defines the observed state of SecretCiphertext. + properties: + atProvider: + properties: + aadContext: + description: Additional authenticated data (AAD context), optional. + If specified, this data will be required for decryption with + the SymmetricDecryptRequest + type: string + ciphertext: + description: Resulting ciphertext, encoded with "standard" base64 + alphabet as defined in RFC 4648 section 4 + type: string + id: + description: an identifier for the resource with format {key_id}/{ciphertext} + type: string + keyId: + description: ID of the symmetric KMS key to use for encryption. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kms.yandex-cloud.upjet.crossplane.io_symmetrickeyiambindings.yaml b/package/crds/kms.yandex-cloud.upjet.crossplane.io_symmetrickeyiambindings.yaml new file mode 100644 index 0000000..dff3a8c --- /dev/null +++ b/package/crds/kms.yandex-cloud.upjet.crossplane.io_symmetrickeyiambindings.yaml @@ -0,0 +1,684 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: symmetrickeyiambindings.kms.yandex-cloud.upjet.crossplane.io +spec: + group: kms.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SymmetricKeyIAMBinding + listKind: SymmetricKeyIAMBindingList + plural: symmetrickeyiambindings + singular: symmetrickeyiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SymmetricKeyIAMBinding is the Schema for the SymmetricKeyIAMBindings + API. Allows management of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SymmetricKeyIAMBindingSpec defines the desired state of SymmetricKeyIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + symmetricKeyId: + description: The Yandex Key Management Service Symmetric Key ID + to apply a binding to. + type: string + symmetricKeyIdRef: + description: Reference to a SymmetricKey to populate symmetricKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + symmetricKeyIdSelector: + description: Selector for a SymmetricKey to populate symmetricKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + symmetricKeyId: + description: The Yandex Key Management Service Symmetric Key ID + to apply a binding to. + type: string + symmetricKeyIdRef: + description: Reference to a SymmetricKey to populate symmetricKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + symmetricKeyIdSelector: + description: Selector for a SymmetricKey to populate symmetricKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: SymmetricKeyIAMBindingStatus defines the observed state of + SymmetricKeyIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + sleepAfter: + type: number + symmetricKeyId: + description: The Yandex Key Management Service Symmetric Key ID + to apply a binding to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kms.yandex-cloud.upjet.crossplane.io_symmetrickeys.yaml b/package/crds/kms.yandex-cloud.upjet.crossplane.io_symmetrickeys.yaml new file mode 100644 index 0000000..309bdd8 --- /dev/null +++ b/package/crds/kms.yandex-cloud.upjet.crossplane.io_symmetrickeys.yaml @@ -0,0 +1,565 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: symmetrickeys.kms.yandex-cloud.upjet.crossplane.io +spec: + group: kms.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SymmetricKey + listKind: SymmetricKeyList + plural: symmetrickeys + singular: symmetrickey + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SymmetricKey is the Schema for the SymmetricKeys API. Creates + a Yandex KMS symmetric key that can be used for cryptographic operation. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SymmetricKeySpec defines the desired state of SymmetricKey + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + defaultAlgorithm: + description: Encryption algorithm to be used with a new key version, + generated with the next rotation. The default value is AES_128. + type: string + deletionProtection: + type: boolean + description: + description: An optional description of the key. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the key. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the key. + type: string + rotationPeriod: + description: Interval between automatic rotations. To disable + automatic rotation, omit this parameter. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + defaultAlgorithm: + description: Encryption algorithm to be used with a new key version, + generated with the next rotation. The default value is AES_128. + type: string + deletionProtection: + type: boolean + description: + description: An optional description of the key. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the key. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the key. + type: string + rotationPeriod: + description: Interval between automatic rotations. To disable + automatic rotation, omit this parameter. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SymmetricKeyStatus defines the observed state of SymmetricKey. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the key. + type: string + defaultAlgorithm: + description: Encryption algorithm to be used with a new key version, + generated with the next rotation. The default value is AES_128. + type: string + deletionProtection: + type: boolean + description: + description: An optional description of the key. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the key. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the key. + type: string + rotatedAt: + description: Last rotation timestamp of the key. + type: string + rotationPeriod: + description: Interval between automatic rotations. To disable + automatic rotation, omit this parameter. + type: string + status: + description: The status of the key. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kubernetes.yandex-cloud.upjet.crossplane.io_clusters.yaml b/package/crds/kubernetes.yandex-cloud.upjet.crossplane.io_clusters.yaml new file mode 100644 index 0000000..135931e --- /dev/null +++ b/package/crds/kubernetes.yandex-cloud.upjet.crossplane.io_clusters.yaml @@ -0,0 +1,2943 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clusters.kubernetes.yandex-cloud.upjet.crossplane.io +spec: + group: kubernetes.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Allows management + of Yandex Kubernetes Cluster. For more information, see + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterIpv4Range: + description: CIDR block. IP range for allocating pod addresses. + It should not overlap with any subnet in the network the Kubernetes + cluster located in. Static routes will be set up for this CIDR + blocks in node subnets. + type: string + clusterIpv6Range: + description: Identical to cluster_ipv4_range but for IPv6 protocol. + type: string + description: + description: A description of the Kubernetes cluster. + type: string + folderId: + description: The ID of the folder that the Kubernetes cluster + belongs to. If it is not provided, the default provider folder + is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsProvider: + description: cluster KMS provider parameters. + items: + properties: + keyId: + description: KMS key ID. + type: string + keyIdRef: + description: Reference to a SymmetricKey in kms to populate + keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a SymmetricKey in kms to populate + keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Kubernetes + cluster. + type: object + x-kubernetes-map-type: granular + master: + description: Kubernetes master configuration options. The structure + is documented below. + items: + properties: + etcdClusterSize: + type: number + externalV6Address: + type: string + maintenancePolicy: + description: (Computed) Maintenance policy for Kubernetes + master. If policy is omitted, automatic revision upgrades + of the kubernetes master are enabled and could happen + at any time. Revision upgrades are performed only within + the same minor version, e.g. 1.13. Minor version upgrades + (e.g. 1.13->1.14) should be performed manually. The structure + is documented below. + items: + properties: + autoUpgrade: + description: Boolean flag that specifies if master + can be upgraded automatically. When omitted, default + value is TRUE. + type: boolean + maintenanceWindow: + description: (Computed) This structure specifies maintenance + window, when update for master is allowed. When + omitted, it defaults to any time. To specify time + of day interval, for all days, one element should + be provided, with two fields set, start_time and + duration. Please see zonal_cluster_resource_name + config example. + items: + properties: + day: + type: string + duration: + type: string + startTime: + type: string + type: object + type: array + type: object + type: array + masterLocation: + description: Cluster master's instances locations array + (zone and subnet). Cannot be used together with zonal + or regional. Currently, supports either one, for zonal + master, or three instances of master_location. Can be + updated inplace. When creating regional cluster (three + master instances), its region will be evaluated automatically + by backend. The structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet. If no ID is specified, + and there only one subnet in specified zone, an + address in this subnet will be allocated. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: ID of the availability zone. + type: string + type: object + type: array + masterLogging: + description: Master Logging options. The structure is documented + below. + items: + properties: + auditEnabled: + description: Boolean flag that specifies if kube-apiserver + audit logs should be sent to Yandex Cloud Logging. + type: boolean + clusterAutoscalerEnabled: + description: Boolean flag that specifies if cluster-autoscaler + logs should be sent to Yandex Cloud Logging. + type: boolean + enabled: + description: Boolean flag that specifies if master + components logs should be sent to Yandex Cloud Logging. + The exact components that will send their logs must + be configured via the options described below. + type: boolean + eventsEnabled: + description: Boolean flag that specifies if kubernetes + cluster events should be sent to Yandex Cloud Logging. + type: boolean + folderId: + description: ID of the folder default Log group of + which should be used to collect logs. + type: string + kubeApiserverEnabled: + description: Boolean flag that specifies if kube-apiserver + logs should be sent to Yandex Cloud Logging. + type: boolean + logGroupId: + description: ID of the Yandex Cloud Logging Log group. + type: string + logGroupIdRef: + description: Reference to a Group in logging to populate + logGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logGroupIdSelector: + description: Selector for a Group in logging to populate + logGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + publicIp: + description: (Computed) Boolean flag. When true, Kubernetes + master will have visible ipv4 address. + type: boolean + regional: + description: Initialize parameters for Regional Master (highly + available master). The structure is documented below. + items: + properties: + location: + description: Array of locations, where master instances + will be allocated. The structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet. If no ID is specified, + and there only one subnet in specified zone, + an address in this subnet will be allocated. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to + populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to + populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: ID of the availability zone. + type: string + zoneRef: + description: Reference to a Subnet in vpc to + populate zone. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + zoneSelector: + description: Selector for a Subnet in vpc to + populate zone. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + region: + description: Name of availability region (e.g. "ru-central1"), + where master instances will be allocated. + type: string + type: object + type: array + securityGroupIds: + description: List of security group IDs to which the Kubernetes + cluster belongs. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc + to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: (Computed) Version of Kubernetes that will + be used for master. + type: string + zonal: + description: Initialize parameters for Zonal Master (single + node master). The structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet. If no ID is specified, + and there only one subnet in specified zone, an + address in this subnet will be allocated. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: ID of the availability zone. + type: string + zoneRef: + description: Reference to a Subnet in vpc to populate + zone. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + zoneSelector: + description: Selector for a Subnet in vpc to populate + zone. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + type: array + name: + description: Name of a specific Kubernetes cluster. + type: string + networkId: + description: The ID of the cluster network. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + networkImplementation: + description: Network Implementation options. The structure is + documented below. + items: + properties: + cilium: + description: Cilium network implementation configuration. + No options exist. + items: + type: object + type: array + type: object + type: array + networkPolicyProvider: + description: 'Network policy provider for the cluster. Possible + values: CALICO.' + type: string + nodeIpv4CidrMaskSize: + description: Size of the masks that are assigned to each node + in the cluster. Effectively limits maximum number of pods for + each node. + type: number + nodeServiceAccountId: + description: Service account to be used by the worker nodes of + the Kubernetes cluster to access Container Registry or to push + node logs and metrics. + type: string + nodeServiceAccountIdRef: + description: Reference to a SecurityGroup in vpc to populate nodeServiceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nodeServiceAccountIdSelector: + description: Selector for a SecurityGroup in vpc to populate nodeServiceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + releaseChannel: + description: Cluster release channel. + type: string + serviceAccountId: + description: Service account to be used for provisioning Compute + Cloud and VPC resources for Kubernetes cluster. Selected service + account should have edit role on the folder where the Kubernetes + cluster will be located and on the folder where selected network + resides. + type: string + serviceAccountIdRef: + description: Reference to a SecurityGroup in vpc to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a SecurityGroup in vpc to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceIpv4Range: + description: CIDR block. IP range Kubernetes service Kubernetes + cluster IP addresses will be allocated from. It should not overlap + with any subnet in the network the Kubernetes cluster located + in. + type: string + serviceIpv6Range: + description: Identical to service_ipv4_range but for IPv6 protocol. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterIpv4Range: + description: CIDR block. IP range for allocating pod addresses. + It should not overlap with any subnet in the network the Kubernetes + cluster located in. Static routes will be set up for this CIDR + blocks in node subnets. + type: string + clusterIpv6Range: + description: Identical to cluster_ipv4_range but for IPv6 protocol. + type: string + description: + description: A description of the Kubernetes cluster. + type: string + folderId: + description: The ID of the folder that the Kubernetes cluster + belongs to. If it is not provided, the default provider folder + is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsProvider: + description: cluster KMS provider parameters. + items: + properties: + keyId: + description: KMS key ID. + type: string + keyIdRef: + description: Reference to a SymmetricKey in kms to populate + keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a SymmetricKey in kms to populate + keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Kubernetes + cluster. + type: object + x-kubernetes-map-type: granular + master: + description: Kubernetes master configuration options. The structure + is documented below. + items: + properties: + etcdClusterSize: + type: number + externalV6Address: + type: string + maintenancePolicy: + description: (Computed) Maintenance policy for Kubernetes + master. If policy is omitted, automatic revision upgrades + of the kubernetes master are enabled and could happen + at any time. Revision upgrades are performed only within + the same minor version, e.g. 1.13. Minor version upgrades + (e.g. 1.13->1.14) should be performed manually. The structure + is documented below. + items: + properties: + autoUpgrade: + description: Boolean flag that specifies if master + can be upgraded automatically. When omitted, default + value is TRUE. + type: boolean + maintenanceWindow: + description: (Computed) This structure specifies maintenance + window, when update for master is allowed. When + omitted, it defaults to any time. To specify time + of day interval, for all days, one element should + be provided, with two fields set, start_time and + duration. Please see zonal_cluster_resource_name + config example. + items: + properties: + day: + type: string + duration: + type: string + startTime: + type: string + type: object + type: array + type: object + type: array + masterLocation: + description: Cluster master's instances locations array + (zone and subnet). Cannot be used together with zonal + or regional. Currently, supports either one, for zonal + master, or three instances of master_location. Can be + updated inplace. When creating regional cluster (three + master instances), its region will be evaluated automatically + by backend. The structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet. If no ID is specified, + and there only one subnet in specified zone, an + address in this subnet will be allocated. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: ID of the availability zone. + type: string + type: object + type: array + masterLogging: + description: Master Logging options. The structure is documented + below. + items: + properties: + auditEnabled: + description: Boolean flag that specifies if kube-apiserver + audit logs should be sent to Yandex Cloud Logging. + type: boolean + clusterAutoscalerEnabled: + description: Boolean flag that specifies if cluster-autoscaler + logs should be sent to Yandex Cloud Logging. + type: boolean + enabled: + description: Boolean flag that specifies if master + components logs should be sent to Yandex Cloud Logging. + The exact components that will send their logs must + be configured via the options described below. + type: boolean + eventsEnabled: + description: Boolean flag that specifies if kubernetes + cluster events should be sent to Yandex Cloud Logging. + type: boolean + folderId: + description: ID of the folder default Log group of + which should be used to collect logs. + type: string + kubeApiserverEnabled: + description: Boolean flag that specifies if kube-apiserver + logs should be sent to Yandex Cloud Logging. + type: boolean + logGroupId: + description: ID of the Yandex Cloud Logging Log group. + type: string + logGroupIdRef: + description: Reference to a Group in logging to populate + logGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logGroupIdSelector: + description: Selector for a Group in logging to populate + logGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + publicIp: + description: (Computed) Boolean flag. When true, Kubernetes + master will have visible ipv4 address. + type: boolean + regional: + description: Initialize parameters for Regional Master (highly + available master). The structure is documented below. + items: + properties: + location: + description: Array of locations, where master instances + will be allocated. The structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet. If no ID is specified, + and there only one subnet in specified zone, + an address in this subnet will be allocated. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to + populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to + populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: ID of the availability zone. + type: string + zoneRef: + description: Reference to a Subnet in vpc to + populate zone. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + zoneSelector: + description: Selector for a Subnet in vpc to + populate zone. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + region: + description: Name of availability region (e.g. "ru-central1"), + where master instances will be allocated. + type: string + type: object + type: array + securityGroupIds: + description: List of security group IDs to which the Kubernetes + cluster belongs. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc + to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: (Computed) Version of Kubernetes that will + be used for master. + type: string + zonal: + description: Initialize parameters for Zonal Master (single + node master). The structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet. If no ID is specified, + and there only one subnet in specified zone, an + address in this subnet will be allocated. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: ID of the availability zone. + type: string + zoneRef: + description: Reference to a Subnet in vpc to populate + zone. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + zoneSelector: + description: Selector for a Subnet in vpc to populate + zone. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + type: array + name: + description: Name of a specific Kubernetes cluster. + type: string + networkId: + description: The ID of the cluster network. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + networkImplementation: + description: Network Implementation options. The structure is + documented below. + items: + properties: + cilium: + description: Cilium network implementation configuration. + No options exist. + items: + type: object + type: array + type: object + type: array + networkPolicyProvider: + description: 'Network policy provider for the cluster. Possible + values: CALICO.' + type: string + nodeIpv4CidrMaskSize: + description: Size of the masks that are assigned to each node + in the cluster. Effectively limits maximum number of pods for + each node. + type: number + nodeServiceAccountId: + description: Service account to be used by the worker nodes of + the Kubernetes cluster to access Container Registry or to push + node logs and metrics. + type: string + nodeServiceAccountIdRef: + description: Reference to a SecurityGroup in vpc to populate nodeServiceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nodeServiceAccountIdSelector: + description: Selector for a SecurityGroup in vpc to populate nodeServiceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + releaseChannel: + description: Cluster release channel. + type: string + serviceAccountId: + description: Service account to be used for provisioning Compute + Cloud and VPC resources for Kubernetes cluster. Selected service + account should have edit role on the folder where the Kubernetes + cluster will be located and on the folder where selected network + resides. + type: string + serviceAccountIdRef: + description: Reference to a SecurityGroup in vpc to populate serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a SecurityGroup in vpc to populate serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceIpv4Range: + description: CIDR block. IP range Kubernetes service Kubernetes + cluster IP addresses will be allocated from. It should not overlap + with any subnet in the network the Kubernetes cluster located + in. + type: string + serviceIpv6Range: + description: Identical to service_ipv4_range but for IPv6 protocol. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.master is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.master) + || (has(self.initProvider) && has(self.initProvider.master))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + clusterIpv4Range: + description: CIDR block. IP range for allocating pod addresses. + It should not overlap with any subnet in the network the Kubernetes + cluster located in. Static routes will be set up for this CIDR + blocks in node subnets. + type: string + clusterIpv6Range: + description: Identical to cluster_ipv4_range but for IPv6 protocol. + type: string + createdAt: + description: (Computed) The Kubernetes cluster creation timestamp. + type: string + description: + description: A description of the Kubernetes cluster. + type: string + folderId: + description: The ID of the folder that the Kubernetes cluster + belongs to. If it is not provided, the default provider folder + is used. + type: string + health: + description: (Computed) Health of the Kubernetes cluster. + type: string + id: + description: (Computed) ID of a new Kubernetes cluster. + type: string + kmsProvider: + description: cluster KMS provider parameters. + items: + properties: + keyId: + description: KMS key ID. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Kubernetes + cluster. + type: object + x-kubernetes-map-type: granular + logGroupId: + description: Log group where cluster stores cluster system logs, + like audit, events, or controlplane logs. + type: string + master: + description: Kubernetes master configuration options. The structure + is documented below. + items: + properties: + clusterCaCertificate: + description: (Computed) PEM-encoded public certificate that + is the root of trust for the Kubernetes cluster. + type: string + etcdClusterSize: + type: number + externalV4Address: + description: (Computed) An IPv4 external network address + that is assigned to the master. + type: string + externalV4Endpoint: + description: (Computed) External endpoint that can be used + to access Kubernetes cluster API from the internet (outside + of the cloud). + type: string + externalV6Address: + type: string + externalV6Endpoint: + type: string + internalV4Address: + description: (Computed) An IPv4 internal network address + that is assigned to the master. + type: string + internalV4Endpoint: + description: (Computed) Internal endpoint that can be used + to connect to the master from cloud networks. + type: string + maintenancePolicy: + description: (Computed) Maintenance policy for Kubernetes + master. If policy is omitted, automatic revision upgrades + of the kubernetes master are enabled and could happen + at any time. Revision upgrades are performed only within + the same minor version, e.g. 1.13. Minor version upgrades + (e.g. 1.13->1.14) should be performed manually. The structure + is documented below. + items: + properties: + autoUpgrade: + description: Boolean flag that specifies if master + can be upgraded automatically. When omitted, default + value is TRUE. + type: boolean + maintenanceWindow: + description: (Computed) This structure specifies maintenance + window, when update for master is allowed. When + omitted, it defaults to any time. To specify time + of day interval, for all days, one element should + be provided, with two fields set, start_time and + duration. Please see zonal_cluster_resource_name + config example. + items: + properties: + day: + type: string + duration: + type: string + startTime: + type: string + type: object + type: array + type: object + type: array + masterLocation: + description: Cluster master's instances locations array + (zone and subnet). Cannot be used together with zonal + or regional. Currently, supports either one, for zonal + master, or three instances of master_location. Can be + updated inplace. When creating regional cluster (three + master instances), its region will be evaluated automatically + by backend. The structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet. If no ID is specified, + and there only one subnet in specified zone, an + address in this subnet will be allocated. + type: string + zone: + description: ID of the availability zone. + type: string + type: object + type: array + masterLogging: + description: Master Logging options. The structure is documented + below. + items: + properties: + auditEnabled: + description: Boolean flag that specifies if kube-apiserver + audit logs should be sent to Yandex Cloud Logging. + type: boolean + clusterAutoscalerEnabled: + description: Boolean flag that specifies if cluster-autoscaler + logs should be sent to Yandex Cloud Logging. + type: boolean + enabled: + description: Boolean flag that specifies if master + components logs should be sent to Yandex Cloud Logging. + The exact components that will send their logs must + be configured via the options described below. + type: boolean + eventsEnabled: + description: Boolean flag that specifies if kubernetes + cluster events should be sent to Yandex Cloud Logging. + type: boolean + folderId: + description: ID of the folder default Log group of + which should be used to collect logs. + type: string + kubeApiserverEnabled: + description: Boolean flag that specifies if kube-apiserver + logs should be sent to Yandex Cloud Logging. + type: boolean + logGroupId: + description: ID of the Yandex Cloud Logging Log group. + type: string + type: object + type: array + publicIp: + description: (Computed) Boolean flag. When true, Kubernetes + master will have visible ipv4 address. + type: boolean + regional: + description: Initialize parameters for Regional Master (highly + available master). The structure is documented below. + items: + properties: + location: + description: Array of locations, where master instances + will be allocated. The structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet. If no ID is specified, + and there only one subnet in specified zone, + an address in this subnet will be allocated. + type: string + zone: + description: ID of the availability zone. + type: string + type: object + type: array + region: + description: Name of availability region (e.g. "ru-central1"), + where master instances will be allocated. + type: string + type: object + type: array + securityGroupIds: + description: List of security group IDs to which the Kubernetes + cluster belongs. + items: + type: string + type: array + x-kubernetes-list-type: set + version: + description: (Computed) Version of Kubernetes that will + be used for master. + type: string + versionInfo: + description: (Computed) Information about cluster version. + The structure is documented below. + items: + properties: + currentVersion: + description: Current Kubernetes version, major.minor + (e.g. 1.15). + type: string + newRevisionAvailable: + description: Boolean flag. Newer revisions may include + Kubernetes patches (e.g 1.15.1 -> 1.15.2) as well + as some internal component updates - new features + or bug fixes in yandex-specific components either + on the master or nodes. + type: boolean + newRevisionSummary: + description: Human readable description of the changes + to be applied when updating to the latest revision. + Empty if new_revision_available is false. + type: string + versionDeprecated: + description: Boolean flag. The current version is + on the deprecation schedule, component (master or + node group) should be upgraded. + type: boolean + type: object + type: array + zonal: + description: Initialize parameters for Zonal Master (single + node master). The structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet. If no ID is specified, + and there only one subnet in specified zone, an + address in this subnet will be allocated. + type: string + zone: + description: ID of the availability zone. + type: string + type: object + type: array + type: object + type: array + name: + description: Name of a specific Kubernetes cluster. + type: string + networkId: + description: The ID of the cluster network. + type: string + networkImplementation: + description: Network Implementation options. The structure is + documented below. + items: + properties: + cilium: + description: Cilium network implementation configuration. + No options exist. + items: + type: object + type: array + type: object + type: array + networkPolicyProvider: + description: 'Network policy provider for the cluster. Possible + values: CALICO.' + type: string + nodeIpv4CidrMaskSize: + description: Size of the masks that are assigned to each node + in the cluster. Effectively limits maximum number of pods for + each node. + type: number + nodeServiceAccountId: + description: Service account to be used by the worker nodes of + the Kubernetes cluster to access Container Registry or to push + node logs and metrics. + type: string + releaseChannel: + description: Cluster release channel. + type: string + serviceAccountId: + description: Service account to be used for provisioning Compute + Cloud and VPC resources for Kubernetes cluster. Selected service + account should have edit role on the folder where the Kubernetes + cluster will be located and on the folder where selected network + resides. + type: string + serviceIpv4Range: + description: CIDR block. IP range Kubernetes service Kubernetes + cluster IP addresses will be allocated from. It should not overlap + with any subnet in the network the Kubernetes cluster located + in. + type: string + serviceIpv6Range: + description: Identical to service_ipv4_range but for IPv6 protocol. + type: string + status: + description: (Computed)Status of the Kubernetes cluster. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kubernetes.yandex-cloud.upjet.crossplane.io_nodegroups.yaml b/package/crds/kubernetes.yandex-cloud.upjet.crossplane.io_nodegroups.yaml new file mode 100644 index 0000000..8ca1908 --- /dev/null +++ b/package/crds/kubernetes.yandex-cloud.upjet.crossplane.io_nodegroups.yaml @@ -0,0 +1,2072 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: nodegroups.kubernetes.yandex-cloud.upjet.crossplane.io +spec: + group: kubernetes.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: NodeGroup + listKind: NodeGroupList + plural: nodegroups + singular: nodegroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: NodeGroup is the Schema for the NodeGroups API. Allows management + of Yandex Kubernetes Node Group. For more information, see + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NodeGroupSpec defines the desired state of NodeGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allocationPolicy: + description: This argument specify subnets (zones), that will + be used by node group compute instances. The structure is documented + below. + items: + properties: + location: + description: Repeated field, that specify subnets (zones), + that will be used by node group compute instances. The + structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet, that will be used by + one compute instance in node group. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: ID of the availability zone where for + one compute instance in node group. + type: string + type: object + type: array + type: object + type: array + allowedUnsafeSysctls: + description: A list of allowed unsafe sysctl parameters for this + node group. For more details see documentation. + items: + type: string + type: array + clusterId: + description: The ID of the Kubernetes cluster that this node group + belongs to. + type: string + clusterIdRef: + description: Reference to a Cluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a Cluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deployPolicy: + description: Deploy policy of the node group. The structure is + documented below. + items: + properties: + maxExpansion: + description: The maximum number of instances that can be + temporarily allocated above the group's target size during + the update. + type: number + maxUnavailable: + description: The maximum number of running instances that + can be taken offline during update. + type: number + type: object + type: array + description: + description: A description of the Kubernetes node group. + type: string + instanceTemplate: + description: Template used to create compute instances in this + Kubernetes node group. The structure is documented below. + items: + properties: + bootDisk: + description: The specifications for boot disks that will + be attached to the instance. The structure is documented + below. + items: + properties: + size: + description: The number of instances in the node group. + type: number + type: + description: 'Type of container runtime. Values: docker, + containerd.' + type: string + type: object + type: array + containerNetwork: + description: Container network configuration. The structure + is documented below. + items: + properties: + podMtu: + description: MTU for pods. + type: number + type: object + type: array + containerRuntime: + description: Container runtime configuration. The structure + is documented below. + items: + properties: + type: + description: 'Type of container runtime. Values: docker, + containerd.' + type: string + type: object + type: array + gpuSettings: + description: GPU settings. The structure is documented below. + items: + properties: + gpuClusterId: + description: GPU cluster id. + type: string + gpuEnvironment: + description: 'GPU environment. Values: runc, runc_drivers_cuda.' + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels that will be assigned to compute nodes + (instances), created by the Node Group. + type: object + x-kubernetes-map-type: granular + metadata: + additionalProperties: + type: string + description: 'The set of metadata key:value pairs assigned + to this instance template. This includes custom metadata + and predefined keys. Note: key "user-data" won''t be provided + into instances. It reserved for internal activity in kubernetes_node_group + resource.' + type: object + x-kubernetes-map-type: granular + name: + description: |- + Name template of the instance. In order to be unique it must contain at least one of instance unique placeholders: + {instance.short_id} + {instance.index} + combination of {instance.zone_id} and {instance.index_in_zone} + Example: my-instance-{instance.index} + If not set, default is used: {instance_group.id}-{instance.short_id} + It may also contain another placeholders, see Compute Instance group metadata doc for full list. + type: string + nat: + description: Boolean flag, enables NAT for node group compute + instances. + type: boolean + networkAccelerationType: + description: 'Type of network acceleration. Values: standard, + software_accelerated.' + type: string + networkInterface: + description: An array with the network interfaces that will + be attached to the instance. The structure is documented + below. + items: + properties: + ipv4: + description: Allocate an IPv4 address for the interface. + The default value is true. + type: boolean + ipv4DnsRecords: + description: List of configurations for creating ipv4 + DNS records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private + zone is used). + type: string + fqdn: + description: DNS record FQDN. + type: string + ptr: + description: When set to true, also create a + PTR DNS record. + type: boolean + ttl: + description: DNS record TTL (in seconds). + type: number + type: object + type: array + ipv6: + description: If true, allocate an IPv6 address for + the interface. The address will be automatically + assigned from the specified subnet. + type: boolean + ipv6DnsRecords: + description: List of configurations for creating ipv6 + DNS records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private + zone is used). + type: string + fqdn: + description: DNS record FQDN. + type: string + ptr: + description: When set to true, also create a + PTR DNS record. + type: boolean + ttl: + description: DNS record TTL (in seconds). + type: number + type: object + type: array + nat: + description: A public address that can be used to + access the internet over NAT. + type: boolean + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to + populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The IDs of the subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in vpc to populate + subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in vpc + to populate subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + placementPolicy: + description: The placement policy configuration. The structure + is documented below. + items: + properties: + placementGroupId: + description: Specifies the id of the Placement Group + to assign to the instances. + type: string + type: object + type: array + platformId: + description: The ID of the hardware platform configuration + for the node group compute instances. + type: string + resources: + items: + properties: + coreFraction: + description: Baseline core performance as a percent. + type: number + cores: + description: Number of CPU cores allocated to the + instance. + type: number + gpus: + description: Number of GPU cores allocated to the + instance. + type: number + memory: + description: The memory size allocated to the instance. + type: number + type: object + type: array + schedulingPolicy: + description: The scheduling policy for the instances in + node group. The structure is documented below. + items: + properties: + preemptible: + description: Specifies if the instance is preemptible. + Defaults to false. + type: boolean + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs assigned to the Kubernetes + node group. + type: object + x-kubernetes-map-type: granular + maintenancePolicy: + description: (Computed) Maintenance policy for this Kubernetes + node group. If policy is omitted, automatic revision upgrades + are enabled and could happen at any time. Revision upgrades + are performed only within the same minor version, e.g. 1.13. + Minor version upgrades (e.g. 1.13->1.14) should be performed + manually. The structure is documented below. + items: + properties: + autoRepair: + description: Boolean flag that specifies if node group can + be repaired automatically. When omitted, default value + is TRUE. + type: boolean + autoUpgrade: + description: Boolean flag that specifies if node group can + be upgraded automatically. When omitted, default value + is TRUE. + type: boolean + maintenanceWindow: + description: (Computed) Set of day intervals, when maintenance + is allowed for this node group. When omitted, it defaults + to any time. + items: + properties: + day: + type: string + duration: + type: string + startTime: + type: string + type: object + type: array + type: object + type: array + name: + description: Name of a specific Kubernetes node group. + type: string + nodeLabels: + additionalProperties: + type: string + description: A set of key/value label pairs, that are assigned + to all the nodes of this Kubernetes node group. + type: object + x-kubernetes-map-type: granular + nodeTaints: + description: A list of Kubernetes taints, that are applied to + all the nodes of this Kubernetes node group. + items: + type: string + type: array + scalePolicy: + description: Scale policy of the node group. The structure is + documented below. + items: + properties: + autoScale: + description: Scale policy for an autoscaled node group. + The structure is documented below. + items: + properties: + initial: + description: Initial number of instances in the node + group. + type: number + max: + description: Maximum number of instances in the node + group. + type: number + min: + description: Minimum number of instances in the node + group. + type: number + type: object + type: array + fixedScale: + description: Scale policy for a fixed scale node group. + The structure is documented below. + items: + properties: + size: + description: The number of instances in the node group. + type: number + type: object + type: array + type: object + type: array + version: + description: Version of Kubernetes that will be used for Kubernetes + node group. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allocationPolicy: + description: This argument specify subnets (zones), that will + be used by node group compute instances. The structure is documented + below. + items: + properties: + location: + description: Repeated field, that specify subnets (zones), + that will be used by node group compute instances. The + structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet, that will be used by + one compute instance in node group. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: ID of the availability zone where for + one compute instance in node group. + type: string + type: object + type: array + type: object + type: array + allowedUnsafeSysctls: + description: A list of allowed unsafe sysctl parameters for this + node group. For more details see documentation. + items: + type: string + type: array + clusterId: + description: The ID of the Kubernetes cluster that this node group + belongs to. + type: string + clusterIdRef: + description: Reference to a Cluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a Cluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deployPolicy: + description: Deploy policy of the node group. The structure is + documented below. + items: + properties: + maxExpansion: + description: The maximum number of instances that can be + temporarily allocated above the group's target size during + the update. + type: number + maxUnavailable: + description: The maximum number of running instances that + can be taken offline during update. + type: number + type: object + type: array + description: + description: A description of the Kubernetes node group. + type: string + instanceTemplate: + description: Template used to create compute instances in this + Kubernetes node group. The structure is documented below. + items: + properties: + bootDisk: + description: The specifications for boot disks that will + be attached to the instance. The structure is documented + below. + items: + properties: + size: + description: The number of instances in the node group. + type: number + type: + description: 'Type of container runtime. Values: docker, + containerd.' + type: string + type: object + type: array + containerNetwork: + description: Container network configuration. The structure + is documented below. + items: + properties: + podMtu: + description: MTU for pods. + type: number + type: object + type: array + containerRuntime: + description: Container runtime configuration. The structure + is documented below. + items: + properties: + type: + description: 'Type of container runtime. Values: docker, + containerd.' + type: string + type: object + type: array + gpuSettings: + description: GPU settings. The structure is documented below. + items: + properties: + gpuClusterId: + description: GPU cluster id. + type: string + gpuEnvironment: + description: 'GPU environment. Values: runc, runc_drivers_cuda.' + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels that will be assigned to compute nodes + (instances), created by the Node Group. + type: object + x-kubernetes-map-type: granular + metadata: + additionalProperties: + type: string + description: 'The set of metadata key:value pairs assigned + to this instance template. This includes custom metadata + and predefined keys. Note: key "user-data" won''t be provided + into instances. It reserved for internal activity in kubernetes_node_group + resource.' + type: object + x-kubernetes-map-type: granular + name: + description: |- + Name template of the instance. In order to be unique it must contain at least one of instance unique placeholders: + {instance.short_id} + {instance.index} + combination of {instance.zone_id} and {instance.index_in_zone} + Example: my-instance-{instance.index} + If not set, default is used: {instance_group.id}-{instance.short_id} + It may also contain another placeholders, see Compute Instance group metadata doc for full list. + type: string + nat: + description: Boolean flag, enables NAT for node group compute + instances. + type: boolean + networkAccelerationType: + description: 'Type of network acceleration. Values: standard, + software_accelerated.' + type: string + networkInterface: + description: An array with the network interfaces that will + be attached to the instance. The structure is documented + below. + items: + properties: + ipv4: + description: Allocate an IPv4 address for the interface. + The default value is true. + type: boolean + ipv4DnsRecords: + description: List of configurations for creating ipv4 + DNS records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private + zone is used). + type: string + fqdn: + description: DNS record FQDN. + type: string + ptr: + description: When set to true, also create a + PTR DNS record. + type: boolean + ttl: + description: DNS record TTL (in seconds). + type: number + type: object + type: array + ipv6: + description: If true, allocate an IPv6 address for + the interface. The address will be automatically + assigned from the specified subnet. + type: boolean + ipv6DnsRecords: + description: List of configurations for creating ipv6 + DNS records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private + zone is used). + type: string + fqdn: + description: DNS record FQDN. + type: string + ptr: + description: When set to true, also create a + PTR DNS record. + type: boolean + ttl: + description: DNS record TTL (in seconds). + type: number + type: object + type: array + nat: + description: A public address that can be used to + access the internet over NAT. + type: boolean + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to + populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup + in vpc to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The IDs of the subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in vpc to populate + subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in vpc + to populate subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + placementPolicy: + description: The placement policy configuration. The structure + is documented below. + items: + properties: + placementGroupId: + description: Specifies the id of the Placement Group + to assign to the instances. + type: string + type: object + type: array + platformId: + description: The ID of the hardware platform configuration + for the node group compute instances. + type: string + resources: + items: + properties: + coreFraction: + description: Baseline core performance as a percent. + type: number + cores: + description: Number of CPU cores allocated to the + instance. + type: number + gpus: + description: Number of GPU cores allocated to the + instance. + type: number + memory: + description: The memory size allocated to the instance. + type: number + type: object + type: array + schedulingPolicy: + description: The scheduling policy for the instances in + node group. The structure is documented below. + items: + properties: + preemptible: + description: Specifies if the instance is preemptible. + Defaults to false. + type: boolean + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs assigned to the Kubernetes + node group. + type: object + x-kubernetes-map-type: granular + maintenancePolicy: + description: (Computed) Maintenance policy for this Kubernetes + node group. If policy is omitted, automatic revision upgrades + are enabled and could happen at any time. Revision upgrades + are performed only within the same minor version, e.g. 1.13. + Minor version upgrades (e.g. 1.13->1.14) should be performed + manually. The structure is documented below. + items: + properties: + autoRepair: + description: Boolean flag that specifies if node group can + be repaired automatically. When omitted, default value + is TRUE. + type: boolean + autoUpgrade: + description: Boolean flag that specifies if node group can + be upgraded automatically. When omitted, default value + is TRUE. + type: boolean + maintenanceWindow: + description: (Computed) Set of day intervals, when maintenance + is allowed for this node group. When omitted, it defaults + to any time. + items: + properties: + day: + type: string + duration: + type: string + startTime: + type: string + type: object + type: array + type: object + type: array + name: + description: Name of a specific Kubernetes node group. + type: string + nodeLabels: + additionalProperties: + type: string + description: A set of key/value label pairs, that are assigned + to all the nodes of this Kubernetes node group. + type: object + x-kubernetes-map-type: granular + nodeTaints: + description: A list of Kubernetes taints, that are applied to + all the nodes of this Kubernetes node group. + items: + type: string + type: array + scalePolicy: + description: Scale policy of the node group. The structure is + documented below. + items: + properties: + autoScale: + description: Scale policy for an autoscaled node group. + The structure is documented below. + items: + properties: + initial: + description: Initial number of instances in the node + group. + type: number + max: + description: Maximum number of instances in the node + group. + type: number + min: + description: Minimum number of instances in the node + group. + type: number + type: object + type: array + fixedScale: + description: Scale policy for a fixed scale node group. + The structure is documented below. + items: + properties: + size: + description: The number of instances in the node group. + type: number + type: object + type: array + type: object + type: array + version: + description: Version of Kubernetes that will be used for Kubernetes + node group. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.instanceTemplate is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instanceTemplate) + || (has(self.initProvider) && has(self.initProvider.instanceTemplate))' + - message: spec.forProvider.scalePolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scalePolicy) + || (has(self.initProvider) && has(self.initProvider.scalePolicy))' + status: + description: NodeGroupStatus defines the observed state of NodeGroup. + properties: + atProvider: + properties: + allocationPolicy: + description: This argument specify subnets (zones), that will + be used by node group compute instances. The structure is documented + below. + items: + properties: + location: + description: Repeated field, that specify subnets (zones), + that will be used by node group compute instances. The + structure is documented below. + items: + properties: + subnetId: + description: ID of the subnet, that will be used by + one compute instance in node group. + type: string + zone: + description: ID of the availability zone where for + one compute instance in node group. + type: string + type: object + type: array + type: object + type: array + allowedUnsafeSysctls: + description: A list of allowed unsafe sysctl parameters for this + node group. For more details see documentation. + items: + type: string + type: array + clusterId: + description: The ID of the Kubernetes cluster that this node group + belongs to. + type: string + createdAt: + description: (Computed) The Kubernetes node group creation timestamp. + type: string + deployPolicy: + description: Deploy policy of the node group. The structure is + documented below. + items: + properties: + maxExpansion: + description: The maximum number of instances that can be + temporarily allocated above the group's target size during + the update. + type: number + maxUnavailable: + description: The maximum number of running instances that + can be taken offline during update. + type: number + type: object + type: array + description: + description: A description of the Kubernetes node group. + type: string + id: + type: string + instanceGroupId: + description: (Computed) ID of instance group that is used to manage + this Kubernetes node group. + type: string + instanceTemplate: + description: Template used to create compute instances in this + Kubernetes node group. The structure is documented below. + items: + properties: + bootDisk: + description: The specifications for boot disks that will + be attached to the instance. The structure is documented + below. + items: + properties: + size: + description: The number of instances in the node group. + type: number + type: + description: 'Type of container runtime. Values: docker, + containerd.' + type: string + type: object + type: array + containerNetwork: + description: Container network configuration. The structure + is documented below. + items: + properties: + podMtu: + description: MTU for pods. + type: number + type: object + type: array + containerRuntime: + description: Container runtime configuration. The structure + is documented below. + items: + properties: + type: + description: 'Type of container runtime. Values: docker, + containerd.' + type: string + type: object + type: array + gpuSettings: + description: GPU settings. The structure is documented below. + items: + properties: + gpuClusterId: + description: GPU cluster id. + type: string + gpuEnvironment: + description: 'GPU environment. Values: runc, runc_drivers_cuda.' + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels that will be assigned to compute nodes + (instances), created by the Node Group. + type: object + x-kubernetes-map-type: granular + metadata: + additionalProperties: + type: string + description: 'The set of metadata key:value pairs assigned + to this instance template. This includes custom metadata + and predefined keys. Note: key "user-data" won''t be provided + into instances. It reserved for internal activity in kubernetes_node_group + resource.' + type: object + x-kubernetes-map-type: granular + name: + description: |- + Name template of the instance. In order to be unique it must contain at least one of instance unique placeholders: + {instance.short_id} + {instance.index} + combination of {instance.zone_id} and {instance.index_in_zone} + Example: my-instance-{instance.index} + If not set, default is used: {instance_group.id}-{instance.short_id} + It may also contain another placeholders, see Compute Instance group metadata doc for full list. + type: string + nat: + description: Boolean flag, enables NAT for node group compute + instances. + type: boolean + networkAccelerationType: + description: 'Type of network acceleration. Values: standard, + software_accelerated.' + type: string + networkInterface: + description: An array with the network interfaces that will + be attached to the instance. The structure is documented + below. + items: + properties: + ipv4: + description: Allocate an IPv4 address for the interface. + The default value is true. + type: boolean + ipv4DnsRecords: + description: List of configurations for creating ipv4 + DNS records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private + zone is used). + type: string + fqdn: + description: DNS record FQDN. + type: string + ptr: + description: When set to true, also create a + PTR DNS record. + type: boolean + ttl: + description: DNS record TTL (in seconds). + type: number + type: object + type: array + ipv6: + description: If true, allocate an IPv6 address for + the interface. The address will be automatically + assigned from the specified subnet. + type: boolean + ipv6DnsRecords: + description: List of configurations for creating ipv6 + DNS records. The structure is documented below. + items: + properties: + dnsZoneId: + description: DNS zone ID (if not set, private + zone is used). + type: string + fqdn: + description: DNS record FQDN. + type: string + ptr: + description: When set to true, also create a + PTR DNS record. + type: boolean + ttl: + description: DNS record TTL (in seconds). + type: number + type: object + type: array + nat: + description: A public address that can be used to + access the internet over NAT. + type: boolean + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The IDs of the subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + placementPolicy: + description: The placement policy configuration. The structure + is documented below. + items: + properties: + placementGroupId: + description: Specifies the id of the Placement Group + to assign to the instances. + type: string + type: object + type: array + platformId: + description: The ID of the hardware platform configuration + for the node group compute instances. + type: string + resources: + items: + properties: + coreFraction: + description: Baseline core performance as a percent. + type: number + cores: + description: Number of CPU cores allocated to the + instance. + type: number + gpus: + description: Number of GPU cores allocated to the + instance. + type: number + memory: + description: The memory size allocated to the instance. + type: number + type: object + type: array + schedulingPolicy: + description: The scheduling policy for the instances in + node group. The structure is documented below. + items: + properties: + preemptible: + description: Specifies if the instance is preemptible. + Defaults to false. + type: boolean + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs assigned to the Kubernetes + node group. + type: object + x-kubernetes-map-type: granular + maintenancePolicy: + description: (Computed) Maintenance policy for this Kubernetes + node group. If policy is omitted, automatic revision upgrades + are enabled and could happen at any time. Revision upgrades + are performed only within the same minor version, e.g. 1.13. + Minor version upgrades (e.g. 1.13->1.14) should be performed + manually. The structure is documented below. + items: + properties: + autoRepair: + description: Boolean flag that specifies if node group can + be repaired automatically. When omitted, default value + is TRUE. + type: boolean + autoUpgrade: + description: Boolean flag that specifies if node group can + be upgraded automatically. When omitted, default value + is TRUE. + type: boolean + maintenanceWindow: + description: (Computed) Set of day intervals, when maintenance + is allowed for this node group. When omitted, it defaults + to any time. + items: + properties: + day: + type: string + duration: + type: string + startTime: + type: string + type: object + type: array + type: object + type: array + name: + description: Name of a specific Kubernetes node group. + type: string + nodeLabels: + additionalProperties: + type: string + description: A set of key/value label pairs, that are assigned + to all the nodes of this Kubernetes node group. + type: object + x-kubernetes-map-type: granular + nodeTaints: + description: A list of Kubernetes taints, that are applied to + all the nodes of this Kubernetes node group. + items: + type: string + type: array + scalePolicy: + description: Scale policy of the node group. The structure is + documented below. + items: + properties: + autoScale: + description: Scale policy for an autoscaled node group. + The structure is documented below. + items: + properties: + initial: + description: Initial number of instances in the node + group. + type: number + max: + description: Maximum number of instances in the node + group. + type: number + min: + description: Minimum number of instances in the node + group. + type: number + type: object + type: array + fixedScale: + description: Scale policy for a fixed scale node group. + The structure is documented below. + items: + properties: + size: + description: The number of instances in the node group. + type: number + type: object + type: array + type: object + type: array + status: + description: (Computed) Status of the Kubernetes node group. + type: string + version: + description: Version of Kubernetes that will be used for Kubernetes + node group. + type: string + versionInfo: + description: (Computed) Information about Kubernetes node group + version. The structure is documented below. + items: + properties: + currentVersion: + description: Current Kubernetes version, major.minor (e.g. + 1.15). + type: string + newRevisionAvailable: + description: True/false flag. Newer revisions may include + Kubernetes patches (e.g 1.15.1 -> 1.15.2) as well as some + internal component updates - new features or bug fixes + in yandex-specific components either on the master or + nodes. + type: boolean + newRevisionSummary: + description: Human readable description of the changes to + be applied when updating to the latest revision. Empty + if new_revision_available is false. + type: string + versionDeprecated: + description: True/false flag. The current version is on + the deprecation schedule, component (master or node group) + should be upgraded. + type: boolean + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.yandex-cloud.upjet.crossplane.io_networkloadbalancers.yaml b/package/crds/lb.yandex-cloud.upjet.crossplane.io_networkloadbalancers.yaml new file mode 100644 index 0000000..3241459 --- /dev/null +++ b/package/crds/lb.yandex-cloud.upjet.crossplane.io_networkloadbalancers.yaml @@ -0,0 +1,1248 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: networkloadbalancers.lb.yandex-cloud.upjet.crossplane.io +spec: + group: lb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: NetworkLoadBalancer + listKind: NetworkLoadBalancerList + plural: networkloadbalancers + singular: networkloadbalancer + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: NetworkLoadBalancer is the Schema for the NetworkLoadBalancers + API. A network load balancer is used to evenly distribute the load across + cloud resources. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NetworkLoadBalancerSpec defines the desired state of NetworkLoadBalancer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + attachedTargetGroup: + description: An AttachedTargetGroup resource. The structure is + documented below. + items: + properties: + healthcheck: + description: A HealthCheck resource. The structure is documented + below. + items: + properties: + healthyThreshold: + description: Number of successful health checks required + in order to set the HEALTHY status for the target. + type: number + httpOptions: + description: Options for HTTP health check. The structure + is documented below. + items: + properties: + path: + description: URL path to set for health checking + requests for every target in the target group. + For example /ping. The default path is /. + type: string + port: + description: Port to use for TCP health checks. + type: number + type: object + type: array + interval: + description: The interval between health checks. The + default is 2 seconds. + type: number + name: + description: Name of the network load balancer. Provided + by the client when the network load balancer is + created. + type: string + tcpOptions: + description: Options for TCP health check. The structure + is documented below. + items: + properties: + port: + description: Port to use for TCP health checks. + type: number + type: object + type: array + timeout: + description: Timeout for a target to return a response + for the health check. The default is 1 second. + type: number + unhealthyThreshold: + description: Number of failed health checks before + changing the status to UNHEALTHY. The default is + 2. + type: number + type: object + type: array + targetGroupId: + description: ID of the target group. + type: string + targetGroupIdRef: + description: Reference to a TargetGroup to populate targetGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetGroupIdSelector: + description: Selector for a TargetGroup to populate targetGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + deletionProtection: + description: Flag that protects the network load balancer from + accidental deletion. + type: boolean + description: + description: An optional description of the network load balancer. + Provide this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this network load balancer. A + list of key/value pairs. + type: object + x-kubernetes-map-type: granular + listener: + description: Listener specification that will be used by a network + load balancer. The structure is documented below. + items: + properties: + externalAddressSpec: + description: External IP address specification. The structure + is documented below. + items: + properties: + address: + description: Internal IP address for a listener. Must + belong to the subnet that is referenced in subnet_id. + IP address will be allocated if it wasn't been set. + type: string + ipVersion: + description: IP version of the internal addresses + that the load balancer works with. Must be one of + ipv4 or ipv6. The default is ipv4. + type: string + type: object + type: array + internalAddressSpec: + description: Internal IP address specification. The structure + is documented below. + items: + properties: + address: + description: Internal IP address for a listener. Must + belong to the subnet that is referenced in subnet_id. + IP address will be allocated if it wasn't been set. + type: string + ipVersion: + description: IP version of the internal addresses + that the load balancer works with. Must be one of + ipv4 or ipv6. The default is ipv4. + type: string + subnetId: + description: ID of the subnet to which the internal + IP address belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + name: + description: Name of the listener. The name must be unique + for each listener on a single load balancer. + type: string + port: + description: Port for incoming traffic. + type: number + protocol: + description: Protocol for incoming traffic. TCP or UDP and + the default is TCP. + type: string + targetPort: + description: Port of a target. The default is the same as + listener's port. + type: number + type: object + type: array + name: + description: Name of the network load balancer. Provided by the + client when the network load balancer is created. + type: string + regionId: + description: ID of the availability zone where the network load + balancer resides. If omitted, default region is being used. + type: string + type: + description: Type of the network load balancer. Must be one of + 'external' or 'internal'. The default is 'external'. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + attachedTargetGroup: + description: An AttachedTargetGroup resource. The structure is + documented below. + items: + properties: + healthcheck: + description: A HealthCheck resource. The structure is documented + below. + items: + properties: + healthyThreshold: + description: Number of successful health checks required + in order to set the HEALTHY status for the target. + type: number + httpOptions: + description: Options for HTTP health check. The structure + is documented below. + items: + properties: + path: + description: URL path to set for health checking + requests for every target in the target group. + For example /ping. The default path is /. + type: string + port: + description: Port to use for TCP health checks. + type: number + type: object + type: array + interval: + description: The interval between health checks. The + default is 2 seconds. + type: number + name: + description: Name of the network load balancer. Provided + by the client when the network load balancer is + created. + type: string + tcpOptions: + description: Options for TCP health check. The structure + is documented below. + items: + properties: + port: + description: Port to use for TCP health checks. + type: number + type: object + type: array + timeout: + description: Timeout for a target to return a response + for the health check. The default is 1 second. + type: number + unhealthyThreshold: + description: Number of failed health checks before + changing the status to UNHEALTHY. The default is + 2. + type: number + type: object + type: array + targetGroupId: + description: ID of the target group. + type: string + targetGroupIdRef: + description: Reference to a TargetGroup to populate targetGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetGroupIdSelector: + description: Selector for a TargetGroup to populate targetGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + deletionProtection: + description: Flag that protects the network load balancer from + accidental deletion. + type: boolean + description: + description: An optional description of the network load balancer. + Provide this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this network load balancer. A + list of key/value pairs. + type: object + x-kubernetes-map-type: granular + listener: + description: Listener specification that will be used by a network + load balancer. The structure is documented below. + items: + properties: + externalAddressSpec: + description: External IP address specification. The structure + is documented below. + items: + properties: + address: + description: Internal IP address for a listener. Must + belong to the subnet that is referenced in subnet_id. + IP address will be allocated if it wasn't been set. + type: string + ipVersion: + description: IP version of the internal addresses + that the load balancer works with. Must be one of + ipv4 or ipv6. The default is ipv4. + type: string + type: object + type: array + internalAddressSpec: + description: Internal IP address specification. The structure + is documented below. + items: + properties: + address: + description: Internal IP address for a listener. Must + belong to the subnet that is referenced in subnet_id. + IP address will be allocated if it wasn't been set. + type: string + ipVersion: + description: IP version of the internal addresses + that the load balancer works with. Must be one of + ipv4 or ipv6. The default is ipv4. + type: string + subnetId: + description: ID of the subnet to which the internal + IP address belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + name: + description: Name of the listener. The name must be unique + for each listener on a single load balancer. + type: string + port: + description: Port for incoming traffic. + type: number + protocol: + description: Protocol for incoming traffic. TCP or UDP and + the default is TCP. + type: string + targetPort: + description: Port of a target. The default is the same as + listener's port. + type: number + type: object + type: array + name: + description: Name of the network load balancer. Provided by the + client when the network load balancer is created. + type: string + regionId: + description: ID of the availability zone where the network load + balancer resides. If omitted, default region is being used. + type: string + type: + description: Type of the network load balancer. Must be one of + 'external' or 'internal'. The default is 'external'. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: NetworkLoadBalancerStatus defines the observed state of NetworkLoadBalancer. + properties: + atProvider: + properties: + attachedTargetGroup: + description: An AttachedTargetGroup resource. The structure is + documented below. + items: + properties: + healthcheck: + description: A HealthCheck resource. The structure is documented + below. + items: + properties: + healthyThreshold: + description: Number of successful health checks required + in order to set the HEALTHY status for the target. + type: number + httpOptions: + description: Options for HTTP health check. The structure + is documented below. + items: + properties: + path: + description: URL path to set for health checking + requests for every target in the target group. + For example /ping. The default path is /. + type: string + port: + description: Port to use for TCP health checks. + type: number + type: object + type: array + interval: + description: The interval between health checks. The + default is 2 seconds. + type: number + name: + description: Name of the network load balancer. Provided + by the client when the network load balancer is + created. + type: string + tcpOptions: + description: Options for TCP health check. The structure + is documented below. + items: + properties: + port: + description: Port to use for TCP health checks. + type: number + type: object + type: array + timeout: + description: Timeout for a target to return a response + for the health check. The default is 1 second. + type: number + unhealthyThreshold: + description: Number of failed health checks before + changing the status to UNHEALTHY. The default is + 2. + type: number + type: object + type: array + targetGroupId: + description: ID of the target group. + type: string + type: object + type: array + createdAt: + description: The network load balancer creation timestamp. + type: string + deletionProtection: + description: Flag that protects the network load balancer from + accidental deletion. + type: boolean + description: + description: An optional description of the network load balancer. + Provide this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + id: + description: The ID of the network load balancer. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this network load balancer. A + list of key/value pairs. + type: object + x-kubernetes-map-type: granular + listener: + description: Listener specification that will be used by a network + load balancer. The structure is documented below. + items: + properties: + externalAddressSpec: + description: External IP address specification. The structure + is documented below. + items: + properties: + address: + description: Internal IP address for a listener. Must + belong to the subnet that is referenced in subnet_id. + IP address will be allocated if it wasn't been set. + type: string + ipVersion: + description: IP version of the internal addresses + that the load balancer works with. Must be one of + ipv4 or ipv6. The default is ipv4. + type: string + type: object + type: array + internalAddressSpec: + description: Internal IP address specification. The structure + is documented below. + items: + properties: + address: + description: Internal IP address for a listener. Must + belong to the subnet that is referenced in subnet_id. + IP address will be allocated if it wasn't been set. + type: string + ipVersion: + description: IP version of the internal addresses + that the load balancer works with. Must be one of + ipv4 or ipv6. The default is ipv4. + type: string + subnetId: + description: ID of the subnet to which the internal + IP address belongs. + type: string + type: object + type: array + name: + description: Name of the listener. The name must be unique + for each listener on a single load balancer. + type: string + port: + description: Port for incoming traffic. + type: number + protocol: + description: Protocol for incoming traffic. TCP or UDP and + the default is TCP. + type: string + targetPort: + description: Port of a target. The default is the same as + listener's port. + type: number + type: object + type: array + name: + description: Name of the network load balancer. Provided by the + client when the network load balancer is created. + type: string + regionId: + description: ID of the availability zone where the network load + balancer resides. If omitted, default region is being used. + type: string + type: + description: Type of the network load balancer. Must be one of + 'external' or 'internal'. The default is 'external'. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.yandex-cloud.upjet.crossplane.io_targetgroups.yaml b/package/crds/lb.yandex-cloud.upjet.crossplane.io_targetgroups.yaml new file mode 100644 index 0000000..3c1e348 --- /dev/null +++ b/package/crds/lb.yandex-cloud.upjet.crossplane.io_targetgroups.yaml @@ -0,0 +1,742 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: targetgroups.lb.yandex-cloud.upjet.crossplane.io +spec: + group: lb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: TargetGroup + listKind: TargetGroupList + plural: targetgroups + singular: targetgroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TargetGroup is the Schema for the TargetGroups API. A load balancer + distributes the load across cloud resources that are combined into a target + group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TargetGroupSpec defines the desired state of TargetGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional description of the target group. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this target group. A list of + key/value pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the target group. Provided by the client + when the target group is created. + type: string + regionId: + description: ID of the availability zone where the target group + resides. If omitted, default region is being used. + type: string + target: + description: A Target resource. The structure is documented below. + items: + properties: + address: + description: IP address of the target. + type: string + subnetId: + description: ID of the subnet that targets are connected + to. All targets in the target group must be connected + to the same subnet within a single availability zone. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional description of the target group. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this target group. A list of + key/value pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the target group. Provided by the client + when the target group is created. + type: string + regionId: + description: ID of the availability zone where the target group + resides. If omitted, default region is being used. + type: string + target: + description: A Target resource. The structure is documented below. + items: + properties: + address: + description: IP address of the target. + type: string + subnetId: + description: ID of the subnet that targets are connected + to. All targets in the target group must be connected + to the same subnet within a single availability zone. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TargetGroupStatus defines the observed state of TargetGroup. + properties: + atProvider: + properties: + createdAt: + description: The target group creation timestamp. + type: string + description: + description: An optional description of the target group. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + id: + description: The ID of the target group. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this target group. A list of + key/value pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the target group. Provided by the client + when the target group is created. + type: string + regionId: + description: ID of the availability zone where the target group + resides. If omitted, default region is being used. + type: string + target: + description: A Target resource. The structure is documented below. + items: + properties: + address: + description: IP address of the target. + type: string + subnetId: + description: ID of the subnet that targets are connected + to. All targets in the target group must be connected + to the same subnet within a single availability zone. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/loadtesting.yandex-cloud.upjet.crossplane.io_agents.yaml b/package/crds/loadtesting.yandex-cloud.upjet.crossplane.io_agents.yaml new file mode 100644 index 0000000..5a32839 --- /dev/null +++ b/package/crds/loadtesting.yandex-cloud.upjet.crossplane.io_agents.yaml @@ -0,0 +1,1268 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: agents.loadtesting.yandex-cloud.upjet.crossplane.io +spec: + group: loadtesting.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Agent + listKind: AgentList + plural: agents + singular: agent + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Agent is the Schema for the Agents API. Manages an Yandex Cloud + Load Testing Agent resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AgentSpec defines the desired state of Agent + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + computeInstance: + description: The template for creating new compute instance running + load testing agent. The structure is documented below. + items: + properties: + bootDisk: + description: Boot disk specifications for the instance. + The structure is documented below. + items: + properties: + autoDelete: + description: Whether the disk is auto-deleted when + the instance is deleted. The default value is true. + type: boolean + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + initializeParams: + description: Parameters for creating a disk alongside + the instance. The structure is documented below. + items: + properties: + blockSize: + description: Block size of the disk, specified + in bytes. + type: number + description: + description: A description of the boot disk. + type: string + name: + description: The name of the load testing agent. + Must be unique within folder. + type: string + size: + description: The size of the disk in GB. Defaults + to 15 GB. + type: number + type: + description: The disk type. + type: string + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to + the instance. + type: object + x-kubernetes-map-type: granular + metadata: + additionalProperties: + type: string + description: A set of metadata key/value pairs to make available + from within the instance. + type: object + x-kubernetes-map-type: granular + networkInterface: + description: Network specifications for the instance. This + can be used multiple times for adding multiple interfaces. + The structure is documented below. + items: + properties: + ipAddress: + description: Manual set static IP address. + type: string + ipv4: + description: Flag for allocating IPv4 address for + the network interface. + type: boolean + ipv6: + description: Flag for allocating IPv6 address for + the network interface. + type: boolean + ipv6Address: + description: Manual set static IPv6 address. + type: string + nat: + description: Flag for using NAT. + type: boolean + natIpAddress: + description: A public address that can be used to + access the internet over NAT. + type: string + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the subnet to attach this interface + to. The subnet must reside in the same zone where + this instance was created. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + platformId: + description: The Compute platform of virtual machine. If + it is not provided, the standard-v2 platform will be used. + type: string + resources: + description: Compute resource specifications for the instance. + The structure is documented below. + items: + properties: + coreFraction: + description: If provided, specifies baseline core + performance as a percent. + type: number + cores: + description: The number of CPU cores for the instance. + Defaults to 2 cores. + type: number + memory: + description: The memory size in GB. Defaults to 2 + GB. + type: number + type: object + type: array + serviceAccountId: + description: The ID of the service account authorized for + this load testing agent. Service account should have loadtesting.generatorClient + or loadtesting.externalAgent role in the folder. + type: string + serviceAccountIdRef: + description: Reference to a SecurityGroup in vpc to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a SecurityGroup in vpc to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zoneId: + description: The availability zone where the virtual machine + will be created. If it is not provided, the default provider + folder is used. + type: string + type: object + type: array + description: + description: A description of the load testing agent. + type: string + folderId: + description: The ID of the folder that the resources belong to. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the agent. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the load testing agent. Must be unique + within folder. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + computeInstance: + description: The template for creating new compute instance running + load testing agent. The structure is documented below. + items: + properties: + bootDisk: + description: Boot disk specifications for the instance. + The structure is documented below. + items: + properties: + autoDelete: + description: Whether the disk is auto-deleted when + the instance is deleted. The default value is true. + type: boolean + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + initializeParams: + description: Parameters for creating a disk alongside + the instance. The structure is documented below. + items: + properties: + blockSize: + description: Block size of the disk, specified + in bytes. + type: number + description: + description: A description of the boot disk. + type: string + name: + description: The name of the load testing agent. + Must be unique within folder. + type: string + size: + description: The size of the disk in GB. Defaults + to 15 GB. + type: number + type: + description: The disk type. + type: string + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to + the instance. + type: object + x-kubernetes-map-type: granular + metadata: + additionalProperties: + type: string + description: A set of metadata key/value pairs to make available + from within the instance. + type: object + x-kubernetes-map-type: granular + networkInterface: + description: Network specifications for the instance. This + can be used multiple times for adding multiple interfaces. + The structure is documented below. + items: + properties: + ipAddress: + description: Manual set static IP address. + type: string + ipv4: + description: Flag for allocating IPv4 address for + the network interface. + type: boolean + ipv6: + description: Flag for allocating IPv6 address for + the network interface. + type: boolean + ipv6Address: + description: Manual set static IPv6 address. + type: string + nat: + description: Flag for using NAT. + type: boolean + natIpAddress: + description: A public address that can be used to + access the internet over NAT. + type: string + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the subnet to attach this interface + to. The subnet must reside in the same zone where + this instance was created. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + platformId: + description: The Compute platform of virtual machine. If + it is not provided, the standard-v2 platform will be used. + type: string + resources: + description: Compute resource specifications for the instance. + The structure is documented below. + items: + properties: + coreFraction: + description: If provided, specifies baseline core + performance as a percent. + type: number + cores: + description: The number of CPU cores for the instance. + Defaults to 2 cores. + type: number + memory: + description: The memory size in GB. Defaults to 2 + GB. + type: number + type: object + type: array + serviceAccountId: + description: The ID of the service account authorized for + this load testing agent. Service account should have loadtesting.generatorClient + or loadtesting.externalAgent role in the folder. + type: string + serviceAccountIdRef: + description: Reference to a SecurityGroup in vpc to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a SecurityGroup in vpc to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zoneId: + description: The availability zone where the virtual machine + will be created. If it is not provided, the default provider + folder is used. + type: string + type: object + type: array + description: + description: A description of the load testing agent. + type: string + folderId: + description: The ID of the folder that the resources belong to. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the agent. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the load testing agent. Must be unique + within folder. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.computeInstance is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.computeInstance) + || (has(self.initProvider) && has(self.initProvider.computeInstance))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: AgentStatus defines the observed state of Agent. + properties: + atProvider: + properties: + computeInstance: + description: The template for creating new compute instance running + load testing agent. The structure is documented below. + items: + properties: + bootDisk: + description: Boot disk specifications for the instance. + The structure is documented below. + items: + properties: + autoDelete: + description: Whether the disk is auto-deleted when + the instance is deleted. The default value is true. + type: boolean + deviceName: + description: This value can be used to reference the + device under /dev/disk/by-id/. + type: string + diskId: + description: (Computed) The ID of created disk. + type: string + initializeParams: + description: Parameters for creating a disk alongside + the instance. The structure is documented below. + items: + properties: + blockSize: + description: Block size of the disk, specified + in bytes. + type: number + description: + description: A description of the boot disk. + type: string + name: + description: The name of the load testing agent. + Must be unique within folder. + type: string + size: + description: The size of the disk in GB. Defaults + to 15 GB. + type: number + type: + description: The disk type. + type: string + type: object + type: array + type: object + type: array + computedLabels: + additionalProperties: + type: string + description: (Computed) The set of labels key:value pairs + assigned to this instance. This includes user custom labels + and predefined items created by Yandex Cloud Load Testing. + type: object + x-kubernetes-map-type: granular + computedMetadata: + additionalProperties: + type: string + description: (Computed) The set of metadata key:value pairs + assigned to this instance. This includes user custom metadata, + and predefined items created by Yandex Cloud Load Testing. + type: object + x-kubernetes-map-type: granular + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to + the instance. + type: object + x-kubernetes-map-type: granular + metadata: + additionalProperties: + type: string + description: A set of metadata key/value pairs to make available + from within the instance. + type: object + x-kubernetes-map-type: granular + networkInterface: + description: Network specifications for the instance. This + can be used multiple times for adding multiple interfaces. + The structure is documented below. + items: + properties: + index: + type: number + ipAddress: + description: Manual set static IP address. + type: string + ipv4: + description: Flag for allocating IPv4 address for + the network interface. + type: boolean + ipv6: + description: Flag for allocating IPv6 address for + the network interface. + type: boolean + ipv6Address: + description: Manual set static IPv6 address. + type: string + macAddress: + type: string + nat: + description: Flag for using NAT. + type: boolean + natIpAddress: + description: A public address that can be used to + access the internet over NAT. + type: string + natIpVersion: + type: string + securityGroupIds: + description: Security group ids for network interface. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the subnet to attach this interface + to. The subnet must reside in the same zone where + this instance was created. + type: string + type: object + type: array + platformId: + description: The Compute platform of virtual machine. If + it is not provided, the standard-v2 platform will be used. + type: string + resources: + description: Compute resource specifications for the instance. + The structure is documented below. + items: + properties: + coreFraction: + description: If provided, specifies baseline core + performance as a percent. + type: number + cores: + description: The number of CPU cores for the instance. + Defaults to 2 cores. + type: number + memory: + description: The memory size in GB. Defaults to 2 + GB. + type: number + type: object + type: array + serviceAccountId: + description: The ID of the service account authorized for + this load testing agent. Service account should have loadtesting.generatorClient + or loadtesting.externalAgent role in the folder. + type: string + zoneId: + description: The availability zone where the virtual machine + will be created. If it is not provided, the default provider + folder is used. + type: string + type: object + type: array + computeInstanceId: + type: string + description: + description: A description of the load testing agent. + type: string + folderId: + description: The ID of the folder that the resources belong to. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the agent. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the load testing agent. Must be unique + within folder. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lockbox.yandex-cloud.upjet.crossplane.io_secretiambindings.yaml b/package/crds/lockbox.yandex-cloud.upjet.crossplane.io_secretiambindings.yaml new file mode 100644 index 0000000..d9997cf --- /dev/null +++ b/package/crds/lockbox.yandex-cloud.upjet.crossplane.io_secretiambindings.yaml @@ -0,0 +1,683 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: secretiambindings.lockbox.yandex-cloud.upjet.crossplane.io +spec: + group: lockbox.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SecretIAMBinding + listKind: SecretIAMBindingList + plural: secretiambindings + singular: secretiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SecretIAMBinding is the Schema for the SecretIAMBindings API. + Allows management of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecretIAMBindingSpec defines the desired state of SecretIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + secretId: + description: The Yandex Lockbox Secret Secret ID to apply a binding + to. + type: string + secretIdRef: + description: Reference to a Secret to populate secretId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretIdSelector: + description: Selector for a Secret to populate secretId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + secretId: + description: The Yandex Lockbox Secret Secret ID to apply a binding + to. + type: string + secretIdRef: + description: Reference to a Secret to populate secretId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretIdSelector: + description: Selector for a Secret to populate secretId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: SecretIAMBindingStatus defines the observed state of SecretIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + secretId: + description: The Yandex Lockbox Secret Secret ID to apply a binding + to. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lockbox.yandex-cloud.upjet.crossplane.io_secrets.yaml b/package/crds/lockbox.yandex-cloud.upjet.crossplane.io_secrets.yaml new file mode 100644 index 0000000..3a50c4c --- /dev/null +++ b/package/crds/lockbox.yandex-cloud.upjet.crossplane.io_secrets.yaml @@ -0,0 +1,824 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: secrets.lockbox.yandex-cloud.upjet.crossplane.io +spec: + group: lockbox.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Secret + listKind: SecretList + plural: secrets + singular: secret + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Secret is the Schema for the Secrets API. Manages Yandex Cloud + Lockbox secret. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecretSpec defines the desired state of Secret + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deletionProtection: + description: Whether the Yandex Cloud Lockbox secret is protected + from deletion. + type: boolean + description: + description: A description for the Yandex Cloud Lockbox secret. + type: string + folderId: + description: ID of the folder that the Yandex Cloud Lockbox secret + belongs to. It will be deduced from provider configuration if + not set explicitly. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsKeyId: + description: The KMS key used to encrypt the Yandex Cloud Lockbox + secret. + type: string + kmsKeyIdRef: + description: Reference to a SymmetricKey in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a SymmetricKey in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Lockbox secret. + type: object + x-kubernetes-map-type: granular + name: + description: Name for the Yandex Cloud Lockbox secret. + type: string + passwordPayloadSpecification: + description: Payload specification for password generation. + items: + properties: + excludedPunctuation: + description: String of punctuation characters to exclude + from the default. Requires include_punctuation = true. + Default is empty. + type: string + includeDigits: + description: Use digits in the generated password. Default + is true. + type: boolean + includeLowercase: + description: Use lowercase letters in the generated password. + Default is true. + type: boolean + includePunctuation: + description: Use punctuations (!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`) + in the generated password. Default is true. + type: boolean + includeUppercase: + description: Use capital letters in the generated password. + Default is true. + type: boolean + includedPunctuation: + description: String of specific punctuation characters to + use. Requires include_punctuation = true. Default is empty. + type: string + length: + description: Length of generated password. Default is 36. + type: number + passwordKey: + description: The key with which the generated password will + be placed in the secret version. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deletionProtection: + description: Whether the Yandex Cloud Lockbox secret is protected + from deletion. + type: boolean + description: + description: A description for the Yandex Cloud Lockbox secret. + type: string + folderId: + description: ID of the folder that the Yandex Cloud Lockbox secret + belongs to. It will be deduced from provider configuration if + not set explicitly. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsKeyId: + description: The KMS key used to encrypt the Yandex Cloud Lockbox + secret. + type: string + kmsKeyIdRef: + description: Reference to a SymmetricKey in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a SymmetricKey in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Lockbox secret. + type: object + x-kubernetes-map-type: granular + name: + description: Name for the Yandex Cloud Lockbox secret. + type: string + passwordPayloadSpecification: + description: Payload specification for password generation. + items: + properties: + excludedPunctuation: + description: String of punctuation characters to exclude + from the default. Requires include_punctuation = true. + Default is empty. + type: string + includeDigits: + description: Use digits in the generated password. Default + is true. + type: boolean + includeLowercase: + description: Use lowercase letters in the generated password. + Default is true. + type: boolean + includePunctuation: + description: Use punctuations (!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`) + in the generated password. Default is true. + type: boolean + includeUppercase: + description: Use capital letters in the generated password. + Default is true. + type: boolean + includedPunctuation: + description: String of specific punctuation characters to + use. Requires include_punctuation = true. Default is empty. + type: string + length: + description: Length of generated password. Default is 36. + type: number + passwordKey: + description: The key with which the generated password will + be placed in the secret version. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SecretStatus defines the observed state of Secret. + properties: + atProvider: + properties: + createdAt: + description: The Yandex Cloud Lockbox secret creation timestamp. + type: string + deletionProtection: + description: Whether the Yandex Cloud Lockbox secret is protected + from deletion. + type: boolean + description: + description: A description for the Yandex Cloud Lockbox secret. + type: string + folderId: + description: ID of the folder that the Yandex Cloud Lockbox secret + belongs to. It will be deduced from provider configuration if + not set explicitly. + type: string + id: + type: string + kmsKeyId: + description: The KMS key used to encrypt the Yandex Cloud Lockbox + secret. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Lockbox secret. + type: object + x-kubernetes-map-type: granular + name: + description: Name for the Yandex Cloud Lockbox secret. + type: string + passwordPayloadSpecification: + description: Payload specification for password generation. + items: + properties: + excludedPunctuation: + description: String of punctuation characters to exclude + from the default. Requires include_punctuation = true. + Default is empty. + type: string + includeDigits: + description: Use digits in the generated password. Default + is true. + type: boolean + includeLowercase: + description: Use lowercase letters in the generated password. + Default is true. + type: boolean + includePunctuation: + description: Use punctuations (!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`) + in the generated password. Default is true. + type: boolean + includeUppercase: + description: Use capital letters in the generated password. + Default is true. + type: boolean + includedPunctuation: + description: String of specific punctuation characters to + use. Requires include_punctuation = true. Default is empty. + type: string + length: + description: Length of generated password. Default is 36. + type: number + passwordKey: + description: The key with which the generated password will + be placed in the secret version. + type: string + type: object + type: array + status: + description: The Yandex Cloud Lockbox secret status. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lockbox.yandex-cloud.upjet.crossplane.io_secretversions.yaml b/package/crds/lockbox.yandex-cloud.upjet.crossplane.io_secretversions.yaml new file mode 100644 index 0000000..c965501 --- /dev/null +++ b/package/crds/lockbox.yandex-cloud.upjet.crossplane.io_secretversions.yaml @@ -0,0 +1,631 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: secretversions.lockbox.yandex-cloud.upjet.crossplane.io +spec: + group: lockbox.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SecretVersion + listKind: SecretVersionList + plural: secretversions + singular: secretversion + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SecretVersion is the Schema for the SecretVersions API. Manages + Yandex Cloud Lockbox secret version. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecretVersionSpec defines the desired state of SecretVersion + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The Yandex Cloud Lockbox secret version description. + type: string + entries: + description: List of entries in the Yandex Cloud Lockbox secret + version. Must be omitted for secrets with a payload specification. + items: + properties: + command: + description: The command that generates the text value of + the entry. + items: + properties: + args: + description: List of arguments to be passed to the + script/command. + items: + type: string + type: array + env: + additionalProperties: + type: string + description: Map of environment variables to set before + calling the script/command. + type: object + x-kubernetes-map-type: granular + path: + description: The path to the script or command to + execute. + type: string + type: object + type: array + key: + description: The key of the entry. + type: string + textValueSecretRef: + description: The text value of the entry. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + secretId: + description: The Yandex Cloud Lockbox secret ID where to add the + version. + type: string + secretIdRef: + description: Reference to a Secret to populate secretId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretIdSelector: + description: Selector for a Secret to populate secretId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The Yandex Cloud Lockbox secret version description. + type: string + entries: + description: List of entries in the Yandex Cloud Lockbox secret + version. Must be omitted for secrets with a payload specification. + items: + properties: + command: + description: The command that generates the text value of + the entry. + items: + properties: + args: + description: List of arguments to be passed to the + script/command. + items: + type: string + type: array + env: + additionalProperties: + type: string + description: Map of environment variables to set before + calling the script/command. + type: object + x-kubernetes-map-type: granular + path: + description: The path to the script or command to + execute. + type: string + type: object + type: array + key: + description: The key of the entry. + type: string + textValueSecretRef: + description: The text value of the entry. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + secretId: + description: The Yandex Cloud Lockbox secret ID where to add the + version. + type: string + secretIdRef: + description: Reference to a Secret to populate secretId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretIdSelector: + description: Selector for a Secret to populate secretId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SecretVersionStatus defines the observed state of SecretVersion. + properties: + atProvider: + properties: + description: + description: The Yandex Cloud Lockbox secret version description. + type: string + entries: + description: List of entries in the Yandex Cloud Lockbox secret + version. Must be omitted for secrets with a payload specification. + items: + properties: + command: + description: The command that generates the text value of + the entry. + items: + properties: + args: + description: List of arguments to be passed to the + script/command. + items: + type: string + type: array + env: + additionalProperties: + type: string + description: Map of environment variables to set before + calling the script/command. + type: object + x-kubernetes-map-type: granular + path: + description: The path to the script or command to + execute. + type: string + type: object + type: array + key: + description: The key of the entry. + type: string + type: object + type: array + id: + type: string + secretId: + description: The Yandex Cloud Lockbox secret ID where to add the + version. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/logging.yandex-cloud.upjet.crossplane.io_groups.yaml b/package/crds/logging.yandex-cloud.upjet.crossplane.io_groups.yaml new file mode 100644 index 0000000..ac890b8 --- /dev/null +++ b/package/crds/logging.yandex-cloud.upjet.crossplane.io_groups.yaml @@ -0,0 +1,561 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: groups.logging.yandex-cloud.upjet.crossplane.io +spec: + group: logging.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Group + listKind: GroupList + plural: groups + singular: group + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Group is the Schema for the Groups API. Manages Yandex Cloud + Logging group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GroupSpec defines the desired state of Group + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dataStream: + type: string + description: + description: A description for the Yandex Cloud Logging group. + type: string + folderId: + description: ID of the folder that the Yandex Cloud Logging group + belongs to. It will be deduced from provider configuration if + not set explicitly. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Logging group. + type: object + x-kubernetes-map-type: granular + name: + description: Name for the Yandex Cloud Logging group. + type: string + retentionPeriod: + description: Log entries retention period for the Yandex Cloud + Logging group. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dataStream: + type: string + description: + description: A description for the Yandex Cloud Logging group. + type: string + folderId: + description: ID of the folder that the Yandex Cloud Logging group + belongs to. It will be deduced from provider configuration if + not set explicitly. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Logging group. + type: object + x-kubernetes-map-type: granular + name: + description: Name for the Yandex Cloud Logging group. + type: string + retentionPeriod: + description: Log entries retention period for the Yandex Cloud + Logging group. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: GroupStatus defines the observed state of Group. + properties: + atProvider: + properties: + cloudId: + description: ID of the cloud that the Yandex Cloud Logging group + belong to. + type: string + createdAt: + description: The Yandex Cloud Logging group creation timestamp. + type: string + dataStream: + type: string + description: + description: A description for the Yandex Cloud Logging group. + type: string + folderId: + description: ID of the folder that the Yandex Cloud Logging group + belongs to. It will be deduced from provider configuration if + not set explicitly. + type: string + id: + description: The Yandex Cloud Logging group ID. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Logging group. + type: object + x-kubernetes-map-type: granular + name: + description: Name for the Yandex Cloud Logging group. + type: string + retentionPeriod: + description: Log entries retention period for the Yandex Cloud + Logging group. + type: string + status: + description: The Yandex Cloud Logging group status. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_clickhouseclusters.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_clickhouseclusters.yaml new file mode 100644 index 0000000..cfd0cf8 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_clickhouseclusters.yaml @@ -0,0 +1,6051 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clickhouseclusters.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ClickhouseCluster + listKind: ClickhouseClusterList + plural: clickhouseclusters + singular: clickhousecluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClickhouseCluster is the Schema for the ClickhouseClusters API. + Manages a ClickHouse cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClickhouseClusterSpec defines the desired state of ClickhouseCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + access: + description: Access policy to the ClickHouse cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for DataLens. Can be either true + or false. + type: boolean + dataTransfer: + description: Allow access for DataTransfer. Can be either + true or false. + type: boolean + metrika: + description: Allow access for Yandex.Metrika. Can be either + true or false. + type: boolean + serverless: + description: Allow access for Serverless. Can be either + true or false. + type: boolean + webSql: + description: Allow access for Web SQL. Can be either true + or false. + type: boolean + yandexQuery: + description: Allow access for YandexQuery. Can be either + true or false. + type: boolean + type: object + type: array + adminPasswordSecretRef: + description: A password used to authorize as user admin when sql_user_management + enabled. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + backupRetainPeriodDays: + description: The period in days during which backups are stored. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC timezone. + The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + clickhouse: + description: Configuration of the ClickHouse subcluster. The structure + is documented below. + items: + properties: + config: + description: Main ClickHouse cluster configuration. + items: + properties: + asynchronousInsertLogEnabled: + type: boolean + asynchronousInsertLogRetentionSize: + type: number + asynchronousInsertLogRetentionTime: + type: number + asynchronousMetricLogEnabled: + type: boolean + asynchronousMetricLogRetentionSize: + type: number + asynchronousMetricLogRetentionTime: + type: number + backgroundBufferFlushSchedulePoolSize: + type: number + backgroundCommonPoolSize: + type: number + backgroundDistributedSchedulePoolSize: + type: number + backgroundFetchesPoolSize: + type: number + backgroundMergesMutationsConcurrencyRatio: + type: number + backgroundMessageBrokerSchedulePoolSize: + type: number + backgroundMovePoolSize: + type: number + backgroundPoolSize: + type: number + backgroundSchedulePoolSize: + type: number + compression: + description: Data compression configuration. The structure + is documented below. + items: + properties: + level: + description: Compression level for ZSTD method. + type: number + method: + description: 'Method: Compression method. Two + methods are available: LZ4 and zstd.' + type: string + minPartSize: + description: 'Min part size: Minimum size (in + bytes) of a data part in a table. ClickHouse + only applies the rule to tables with data + parts greater than or equal to the Min part + size value.' + type: number + minPartSizeRatio: + description: 'Min part size ratio: Minimum table + part size to total table size ratio. ClickHouse + only applies the rule to tables in which this + ratio is greater than or equal to the Min + part size ratio value.' + type: number + type: object + type: array + defaultDatabase: + description: A database of the ClickHouse cluster. + The structure is documented below. + type: string + dictionariesLazyLoad: + type: boolean + geobaseEnabled: + type: boolean + geobaseUri: + type: string + graphiteRollup: + description: Graphite rollup configuration. The structure + is documented below. + items: + properties: + name: + description: The name of the user. + type: string + pathColumnName: + description: 'The name of the column storing + the metric name (Graphite sensor). Default + value: Path.' + type: string + pattern: + description: Set of thinning rules. + items: + properties: + function: + description: Aggregation function name. + type: string + regexp: + description: Regular expression that the + metric name must match. + type: string + retention: + description: Retain parameters. + items: + properties: + age: + description: Minimum data age in + seconds. + type: number + precision: + description: Accuracy of determining + the age of the data in seconds. + type: number + type: object + type: array + type: object + type: array + timeColumnName: + description: 'The name of the column storing + the time of measuring the metric. Default + value: Time.' + type: string + valueColumnName: + description: 'The name of the column storing + the value of the metric at the time set in + time_column_name. Default value: Value.' + type: string + versionColumnName: + description: 'The name of the column storing + the version of the metric. Default value: + Timestamp.' + type: string + type: object + type: array + kafka: + description: Kafka connection configuration. The structure + is documented below. + items: + properties: + autoOffsetReset: + description: 'Action to take when there is no + initial offset in offset store or the desired + offset is out of range: ''smallest'',''earliest'' + - automatically reset the offset to the smallest + offset, ''largest'',''latest'' - automatically + reset the offset to the largest offset, ''error'' + - trigger an error (ERR__AUTO_OFFSET_RESET) + which is retrieved by consuming messages and + checking ''message->err''.' + type: string + debug: + description: A comma-separated list of debug + contexts to enable. + type: string + enableSslCertificateVerification: + description: enable verification of SSL certificates. + type: boolean + maxPollIntervalMs: + description: Maximum allowed time between calls + to consume messages (e.g., rd_kafka_consumer_poll()) + for high-level consumers. If this interval + is exceeded the consumer is considered failed + and the group will rebalance in order to reassign + the partitions to another consumer group member. + type: number + saslMechanism: + description: SASL mechanism used in kafka authentication. + type: string + saslPasswordSecretRef: + description: User password on kafka server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Username on kafka server. + type: string + securityProtocol: + description: Security protocol used to connect + to kafka server. + type: string + sessionTimeoutMs: + description: Client group session and failure + detection timeout. The consumer sends periodic + heartbeats (heartbeat.interval.ms) to indicate + its liveness to the broker. If no hearts are + received by the broker for a group member + within the session timeout, the broker will + remove the consumer from the group and trigger + a rebalance. + type: number + type: object + type: array + kafkaTopic: + description: Kafka topic connection configuration. + The structure is documented below. + items: + properties: + name: + description: The name of the user. + type: string + settings: + description: Custom settings for user. The list + is documented below. + items: + properties: + autoOffsetReset: + description: 'Action to take when there + is no initial offset in offset store + or the desired offset is out of range: + ''smallest'',''earliest'' - automatically + reset the offset to the smallest offset, + ''largest'',''latest'' - automatically + reset the offset to the largest offset, + ''error'' - trigger an error (ERR__AUTO_OFFSET_RESET) + which is retrieved by consuming messages + and checking ''message->err''.' + type: string + debug: + description: A comma-separated list of + debug contexts to enable. + type: string + enableSslCertificateVerification: + description: enable verification of SSL + certificates. + type: boolean + maxPollIntervalMs: + description: Maximum allowed time between + calls to consume messages (e.g., rd_kafka_consumer_poll()) + for high-level consumers. If this interval + is exceeded the consumer is considered + failed and the group will rebalance + in order to reassign the partitions + to another consumer group member. + type: number + saslMechanism: + description: SASL mechanism used in kafka + authentication. + type: string + saslPasswordSecretRef: + description: User password on kafka server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Username on kafka server. + type: string + securityProtocol: + description: Security protocol used to + connect to kafka server. + type: string + sessionTimeoutMs: + description: Client group session and + failure detection timeout. The consumer + sends periodic heartbeats (heartbeat.interval.ms) + to indicate its liveness to the broker. + If no hearts are received by the broker + for a group member within the session + timeout, the broker will remove the + consumer from the group and trigger + a rebalance. + type: number + type: object + type: array + type: object + type: array + keepAliveTimeout: + type: number + logLevel: + description: ClickHouse server parameters. For more + information, see the official documentation. + type: string + markCacheSize: + type: number + maxConcurrentQueries: + type: number + maxConnections: + type: number + maxPartitionSizeToDrop: + type: number + maxTableSizeToDrop: + type: number + mergeTree: + description: MergeTree engine configuration. The structure + is documented below. + items: + properties: + allowRemoteFsZeroCopyReplication: + description: When this setting has a value greater + than zero only a single replica starts the + merge immediately if merged part on shared + storage and allow_remote_fs_zero_copy_replication + is enabled. + type: boolean + checkSampleColumnIsCorrect: + description: 'Enables the check at table creation, + that the data type of a column for sampling + or sampling expression is correct. The data + type must be one of unsigned integer types: + UInt8, UInt16, UInt32, UInt64. Default value: + true.' + type: boolean + cleanupDelayPeriod: + description: Minimum period to clean old queue + logs, blocks hashes and parts. + type: number + inactivePartsToDelayInsert: + description: If the number of inactive parts + in a single partition in the table at least + that many the inactive_parts_to_delay_insert + value, an INSERT artificially slows down. + It is useful when a server fails to clean + up parts quickly enough. + type: number + inactivePartsToThrowInsert: + description: If the number of inactive parts + in a single partition more than the inactive_parts_to_throw_insert + value, INSERT is interrupted with the "Too + many inactive parts (N). Parts cleaning are + processing significantly slower than inserts" + exception. + type: number + maxAvgPartSizeForTooManyParts: + description: The too many parts check according + to parts_to_delay_insert and parts_to_throw_insert + will be active only if the average part size + (in the relevant partition) is not larger + than the specified threshold. If it is larger + than the specified threshold, the INSERTs + will be neither delayed or rejected. This + allows to have hundreds of terabytes in a + single table on a single server if the parts + are successfully merged to larger parts. This + does not affect the thresholds on inactive + parts or total parts. + type: number + maxBytesToMergeAtMaxSpaceInPool: + description: The maximum total parts size (in + bytes) to be merged into one part, if there + are enough resources available. max_bytes_to_merge_at_max_space_in_pool + -- roughly corresponds to the maximum possible + part size created by an automatic background + merge. + type: number + maxBytesToMergeAtMinSpaceInPool: + description: 'Max bytes to merge at min space + in pool: Maximum total size of a data part + to merge when the number of free threads in + the background pool is minimum.' + type: number + maxCleanupDelayPeriod: + description: 'Maximum period to clean old queue + logs, blocks hashes and parts. Default value: + 300 seconds.' + type: number + maxMergeSelectingSleepMs: + description: 'Maximum sleep time for merge selecting, + a lower setting will trigger selecting tasks + in background_schedule_pool frequently which + result in large amount of requests to zookeeper + in large-scale clusters. Default value: 60000 + milliseconds (60 seconds).' + type: number + maxNumberOfMergesWithTtlInPool: + description: When there is more than specified + number of merges with TTL entries in pool, + do not assign new merge with TTL. + type: number + maxPartsInTotal: + description: Maximum number of parts in all + partitions. + type: number + maxReplicatedMergesInQueue: + description: 'Max replicated merges in queue: + Maximum number of merge tasks that can be + in the ReplicatedMergeTree queue at the same + time.' + type: number + mergeMaxBlockSize: + description: 'The number of rows that are read + from the merged parts into memory. Default + value: 8192.' + type: number + mergeSelectingSleepMs: + description: Sleep time for merge selecting + when no part is selected. A lower setting + triggers selecting tasks in background_schedule_pool + frequently, which results in a large number + of requests to ClickHouse Keeper in large-scale + clusters. + type: number + mergeWithRecompressionTtlTimeout: + description: 'Minimum delay in seconds before + repeating a merge with recompression TTL. + Default value: 14400 seconds (4 hours).' + type: number + mergeWithTtlTimeout: + description: 'Minimum delay in seconds before + repeating a merge with delete TTL. Default + value: 14400 seconds (4 hours).' + type: number + minAgeToForceMergeOnPartitionOnly: + description: Whether min_age_to_force_merge_seconds + should be applied only on the entire partition + and not on subset. + type: boolean + minAgeToForceMergeSeconds: + description: Merge parts if every part in the + range is older than the value of min_age_to_force_merge_seconds. + type: number + minBytesForWidePart: + description: Minimum number of bytes in a data + part that can be stored in Wide format. You + can set one, both or none of these settings. + type: number + minRowsForWidePart: + description: Minimum number of rows in a data + part that can be stored in Wide format. You + can set one, both or none of these settings. + type: number + numberOfFreeEntriesInPoolToExecuteMutation: + description: 'When there is less than specified + number of free entries in pool, do not execute + part mutations. This is to leave free threads + for regular merges and avoid "Too many parts". + Default value: 20.' + type: number + numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: + description: 'Number of free entries in pool + to lower max size of merge: Threshold value + of free entries in the pool. If the number + of entries in the pool falls below this value, + ClickHouse reduces the maximum size of a data + part to merge. This helps handle small merges + faster, rather than filling the pool with + lengthy merges.' + type: number + partsToDelayInsert: + description: 'Parts to delay insert: Number + of active data parts in a table, on exceeding + which ClickHouse starts artificially reduce + the rate of inserting data into the table.' + type: number + partsToThrowInsert: + description: 'Parts to throw insert: Threshold + value of active data parts in a table, on + exceeding which ClickHouse throws the ''Too + many parts ...'' exception.' + type: number + replicatedDeduplicationWindow: + description: 'Replicated deduplication window: + Number of recent hash blocks that ZooKeeper + will store (the old ones will be deleted).' + type: number + replicatedDeduplicationWindowSeconds: + description: 'Replicated deduplication window + seconds: Time during which ZooKeeper stores + the hash blocks (the old ones wil be deleted).' + type: number + ttlOnlyDropParts: + description: Enables zero-copy replication when + a replica is located on a remote filesystem. + type: boolean + type: object + type: array + metricLogEnabled: + type: boolean + metricLogRetentionSize: + type: number + metricLogRetentionTime: + type: number + opentelemetrySpanLogEnabled: + type: boolean + opentelemetrySpanLogRetentionSize: + type: number + opentelemetrySpanLogRetentionTime: + type: number + partLogRetentionSize: + type: number + partLogRetentionTime: + type: number + queryCache: + description: Query cache configuration. The structure + is documented below. + items: + properties: + maxEntries: + description: 'The maximum number of SELECT query + results stored in the cache. Default value: + 1024.' + type: number + maxEntrySizeInBytes: + description: 'The maximum size in bytes SELECT + query results may have to be saved in the + cache. Default value: 1048576 (1 MiB).' + type: number + maxEntrySizeInRows: + description: 'The maximum number of rows SELECT + query results may have to be saved in the + cache. Default value: 30000000 (30 mil).' + type: number + maxSizeInBytes: + description: 'The maximum cache size in bytes. + 0 means the query cache is disabled. Default + value: 1073741824 (1 GiB).' + type: number + type: object + type: array + queryLogRetentionSize: + type: number + queryLogRetentionTime: + type: number + queryMaskingRules: + description: Query masking rules configuration. The + structure is documented below. + items: + properties: + name: + description: The name of the user. + type: string + regexp: + description: Regular expression that the metric + name must match. + type: string + replace: + description: 'Substitution string for sensitive + data. Default value: six asterisks.' + type: string + type: object + type: array + queryThreadLogEnabled: + type: boolean + queryThreadLogRetentionSize: + type: number + queryThreadLogRetentionTime: + type: number + queryViewsLogEnabled: + type: boolean + queryViewsLogRetentionSize: + type: number + queryViewsLogRetentionTime: + type: number + rabbitmq: + description: RabbitMQ connection configuration. The + structure is documented below. + items: + properties: + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: RabbitMQ username. + type: string + vhost: + description: 'RabbitMQ vhost. Default: ”.' + type: string + type: object + type: array + sessionLogEnabled: + type: boolean + sessionLogRetentionSize: + type: number + sessionLogRetentionTime: + type: number + textLogEnabled: + type: boolean + textLogLevel: + type: string + textLogRetentionSize: + type: number + textLogRetentionTime: + type: number + timezone: + type: string + totalMemoryProfilerStep: + type: number + traceLogEnabled: + type: boolean + traceLogRetentionSize: + type: number + traceLogRetentionTime: + type: number + uncompressedCacheSize: + type: number + zookeeperLogEnabled: + type: boolean + zookeeperLogRetentionSize: + type: number + zookeeperLogRetentionTime: + type: number + type: object + type: array + resources: + description: Resources allocated to hosts of the ClickHouse + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper hosts. + For more information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + cloudStorage: + description: Minimum data age in seconds. + items: + properties: + dataCacheEnabled: + description: Enables temporary storage in the cluster repository + of data requested from the object repository. + type: boolean + dataCacheMaxSize: + description: Defines the maximum amount of memory (in bytes) + allocated in the cluster storage for temporary storage + of data requested from the object storage. + type: number + enabled: + description: Whether to use Yandex Object Storage for storing + ClickHouse data. Can be either true or false. + type: boolean + moveFactor: + description: Sets the minimum free space ratio in the cluster + storage. If the free space is lower than this value, the + data is transferred to Yandex Object Storage. Acceptable + values are 0 to 1, inclusive. + type: number + preferNotToMerge: + description: Disables merging of data parts in Yandex Object + Storage. + type: boolean + type: object + type: array + clusterId: + type: string + copySchemaOnNewHosts: + description: Whether to copy schema on new ClickHouse hosts. + type: boolean + database: + description: A database of the ClickHouse cluster. The structure + is documented below. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the ClickHouse cluster. + type: string + embeddedKeeper: + description: Whether to use ClickHouse Keeper as a coordination + system and place it on the same hosts with ClickHouse. If not, + it's used ZooKeeper with placement on separate hosts. + type: boolean + environment: + description: Deployment environment of the ClickHouse cluster. + Can be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + formatSchema: + description: A set of protobuf or capnproto format schemas. The + structure is documented below. + items: + properties: + name: + description: The name of the format schema. + type: string + type: + description: Type of the format schema. + type: string + uri: + description: Format schema file URL. You can only use format + schemas stored in Yandex Object Storage. + type: string + type: object + type: array + host: + description: A host of the ClickHouse cluster. The structure is + documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. Can be either true or false. + type: boolean + shardName: + description: The name of the shard to which the host belongs. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The type of the host to be deployed. Can be + either CLICKHOUSE or ZOOKEEPER. + type: string + zone: + description: The availability zone where the ClickHouse + host will be created. For more information see the official + documentation. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the ClickHouse + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + mlModel: + description: A group of machine learning models. The structure + is documented below + items: + properties: + name: + description: The name of the ml model. + type: string + type: + description: Type of the model. + type: string + uri: + description: Model file URL. You can only use models stored + in Yandex Object Storage. + type: string + type: object + type: array + name: + description: Name of the ClickHouse cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the ClickHouse cluster + belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountId: + description: ID of the service account used for access to Yandex + Object Storage. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + shard: + items: + properties: + name: + description: The name of shard. + type: string + resources: + description: Resources allocated to host of the shard. The + resources specified for the shard takes precedence over + the resources specified for the cluster. The structure + is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper hosts. + For more information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + weight: + description: The weight of shard. + type: number + type: object + type: array + shardGroup: + description: A group of clickhouse shards. The structure is documented + below. + items: + properties: + description: + description: Description of the shard group. + type: string + name: + description: The name of the shard group, used as cluster + name in Distributed tables. + type: string + shardNames: + description: List of shards names that belong to the shard + group. + items: + type: string + type: array + type: object + type: array + sqlDatabaseManagement: + description: Grants admin user database management permission. + type: boolean + sqlUserManagement: + description: Enables admin user with user management permission. + type: boolean + user: + description: A user of the ClickHouse cluster. The structure is + documented below. + items: + properties: + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + type: object + type: array + quota: + description: Set of user quotas. The structure is documented + below. + items: + properties: + errors: + description: The number of queries that threw exception. + type: number + executionTime: + description: The total query execution time, in milliseconds + (wall time). + type: number + intervalDuration: + description: Duration of interval for quota in milliseconds. + type: number + queries: + description: The total number of queries. + type: number + readRows: + description: The total number of source rows read + from tables for running the query, on all remote + servers. + type: number + resultRows: + description: The total number of rows given as the + result. + type: number + type: object + type: array + settings: + description: Custom settings for user. The list is documented + below. + items: + properties: + addHttpCorsHeader: + description: Include CORS headers in HTTP responces. + type: boolean + allowDdl: + description: Allows or denies DDL queries. + type: boolean + allowIntrospectionFunctions: + description: Enables introspections functions for + query profiling. + type: boolean + allowSuspiciousLowCardinalityTypes: + description: Allows specifying LowCardinality modifier + for types of small fixed size (8 or less) in CREATE + TABLE statements. Enabling this may increase merge + times and memory consumption. + type: boolean + anyJoinDistinctRightTableKeys: + description: enables legacy ClickHouse server behaviour + in ANY INNER|LEFT JOIN operations. + type: boolean + asyncInsert: + description: Enables asynchronous inserts. Disabled + by default. + type: boolean + asyncInsertBusyTimeout: + description: 'The maximum timeout in milliseconds + since the first INSERT query before inserting collected + data. If the parameter is set to 0, the timeout + is disabled. Default value: 200.' + type: number + asyncInsertMaxDataSize: + description: 'The maximum size of the unparsed data + in bytes collected per query before being inserted. + If the parameter is set to 0, asynchronous insertions + are disabled. Default value: 100000.' + type: number + asyncInsertStaleTimeout: + description: The maximum timeout in milliseconds since + the last INSERT query before dumping collected data. + If enabled, the settings prolongs the async_insert_busy_timeout + with every INSERT query as long as async_insert_max_data_size + is not exceeded. + type: number + asyncInsertThreads: + description: 'The maximum number of threads for background + data parsing and insertion. If the parameter is + set to 0, asynchronous insertions are disabled. + Default value: 16.' + type: number + cancelHttpReadonlyQueriesOnClientClose: + description: 'Cancels HTTP read-only queries (e.g. + SELECT) when a client closes the connection without + waiting for the response. Default value: false.' + type: boolean + compile: + description: Enable compilation of queries. + type: boolean + compileExpressions: + description: Turn on expression compilation. + type: boolean + connectTimeout: + description: Connect timeout in milliseconds on the + socket used for communicating with the client. + type: number + connectTimeoutWithFailover: + description: 'The timeout in milliseconds for connecting + to a remote server for a Distributed table engine, + if the ‘shard’ and ‘replica’ sections are used in + the cluster definition. If unsuccessful, several + attempts are made to connect to various replicas. + Default value: 50.' + type: number + countDistinctImplementation: + description: Specifies which of the uniq* functions + should be used to perform the COUNT(DISTINCT …) + construction. + type: string + dateTimeInputFormat: + description: 'Allows choosing a parser of the text + representation of date and time, one of: best_effort, + basic, best_effort_us. Default value: basic. Cloud + default value: best_effort.' + type: string + dateTimeOutputFormat: + description: 'Allows choosing different output formats + of the text representation of date and time, one + of: simple, iso, unix_timestamp. Default value: + simple.' + type: string + deduplicateBlocksInDependentMaterializedViews: + description: Enables or disables the deduplication + check for materialized views that receive data from + Replicated* tables. + type: boolean + distinctOverflowMode: + description: 'Sets behaviour on overflow when using + DISTINCT. Possible values:' + type: string + distributedAggregationMemoryEfficient: + description: Determine the behavior of distributed + subqueries. + type: boolean + distributedDdlTaskTimeout: + description: Timeout for DDL queries, in milliseconds. + type: number + distributedProductMode: + description: Changes the behaviour of distributed + subqueries. + type: string + emptyResultForAggregationByEmptySet: + description: Allows to retunr empty result. + type: boolean + enableHttpCompression: + description: Enables or disables data compression + in the response to an HTTP request. + type: boolean + fallbackToStaleReplicasForDistributedQueries: + description: Forces a query to an out-of-date replica + if updated data is not available. + type: boolean + flattenNested: + description: Sets the data format of a nested columns. + type: boolean + forceIndexByDate: + description: Disables query execution if the index + can’t be used by date. + type: boolean + forcePrimaryKey: + description: Disables query execution if indexing + by the primary key is not possible. + type: boolean + formatRegexp: + description: Regular expression (for Regexp format). + type: string + formatRegexpSkipUnmatched: + description: Skip lines unmatched by regular expression. + type: boolean + groupByOverflowMode: + description: 'Sets behaviour on overflow while GROUP + BY operation. Possible values:' + type: string + groupByTwoLevelThreshold: + description: Sets the threshold of the number of keys, + after that the two-level aggregation should be used. + type: number + groupByTwoLevelThresholdBytes: + description: Sets the threshold of the number of bytes, + after that the two-level aggregation should be used. + type: number + hedgedConnectionTimeoutMs: + description: 'Connection timeout for establishing + connection with replica for Hedged requests. Default + value: 50 milliseconds.' + type: number + httpConnectionTimeout: + description: Timeout for HTTP connection in milliseconds. + type: number + httpHeadersProgressInterval: + description: Sets minimal interval between notifications + about request process in HTTP header X-ClickHouse-Progress. + type: number + httpReceiveTimeout: + description: Timeout for HTTP connection in milliseconds. + type: number + httpSendTimeout: + description: Timeout for HTTP connection in milliseconds. + type: number + idleConnectionTimeout: + description: 'Timeout to close idle TCP connections + after specified number of seconds. Default value: + 3600 seconds.' + type: number + inputFormatDefaultsForOmittedFields: + description: When performing INSERT queries, replace + omitted input column values with default values + of the respective columns. + type: boolean + inputFormatImportNestedJson: + description: Enables or disables the insertion of + JSON data with nested objects. + type: boolean + inputFormatNullAsDefault: + description: Enables or disables the initialization + of NULL fields with default values, if data type + of these fields is not nullable. + type: boolean + inputFormatParallelParsing: + description: Enables or disables order-preserving + parallel parsing of data formats. Supported only + for TSV, TKSV, CSV and JSONEachRow formats. + type: boolean + inputFormatValuesInterpretExpressions: + description: Enables or disables the full SQL parser + if the fast stream parser can’t parse the data. + type: boolean + inputFormatWithNamesUseHeader: + description: Enables or disables checking the column + order when inserting data. + type: boolean + insertKeeperMaxRetries: + description: The setting sets the maximum number of + retries for ClickHouse Keeper (or ZooKeeper) requests + during insert into replicated MergeTree. Only Keeper + requests which failed due to network error, Keeper + session timeout, or request timeout are considered + for retries. + type: number + insertNullAsDefault: + description: 'Enables the insertion of default values + instead of NULL into columns with not nullable data + type. Default value: true.' + type: boolean + insertQuorum: + description: Enables the quorum writes. + type: number + insertQuorumParallel: + description: Enables or disables parallelism for quorum + INSERT queries. + type: boolean + insertQuorumTimeout: + description: Write to a quorum timeout in milliseconds. + type: number + joinAlgorithm: + description: 'Specifies which JOIN algorithm is used. + Possible values:' + items: + type: string + type: array + joinOverflowMode: + description: 'Sets behaviour on overflow in JOIN. + Possible values:' + type: string + joinUseNulls: + description: Sets the type of JOIN behaviour. When + merging tables, empty cells may appear. ClickHouse + fills them differently based on this setting. + type: boolean + joinedSubqueryRequiresAlias: + description: Require aliases for subselects and table + functions in FROM that more than one table is present. + type: boolean + loadBalancing: + description: 'Specifies the algorithm of replicas + selection that is used for distributed query processing, + one of: random, nearest_hostname, in_order, first_or_random, + round_robin. Default value: random.' + type: string + localFilesystemReadMethod: + description: 'Method of reading data from local filesystem. + Possible values:' + type: string + logQueryThreads: + description: 'Setting up query threads logging. Query + threads log into the system.query_thread_log table. + This setting has effect only when log_queries is + true. Queries’ threads run by ClickHouse with this + setup are logged according to the rules in the query_thread_log + server configuration parameter. Default value: true.' + type: boolean + lowCardinalityAllowInNativeFormat: + description: Allows or restricts using the LowCardinality + data type with the Native format. + type: boolean + maxAstDepth: + description: Maximum abstract syntax tree depth. + type: number + maxAstElements: + description: Maximum abstract syntax tree elements. + type: number + maxBlockSize: + description: A recommendation for what size of the + block (in a count of rows) to load from tables. + type: number + maxBytesBeforeExternalGroupBy: + description: Limit in bytes for using memoru for GROUP + BY before using swap on disk. + type: number + maxBytesBeforeExternalSort: + description: This setting is equivalent of the max_bytes_before_external_group_by + setting, except for it is for sort operation (ORDER + BY), not aggregation. + type: number + maxBytesInDistinct: + description: Limits the maximum size of a hash table + in bytes (uncompressed data) when using DISTINCT. + type: number + maxBytesInJoin: + description: Limit on maximum size of the hash table + for JOIN, in bytes. + type: number + maxBytesInSet: + description: Limit on the number of bytes in the set + resulting from the execution of the IN section. + type: number + maxBytesToRead: + description: Limits the maximum number of bytes (uncompressed + data) that can be read from a table when running + a query. + type: number + maxBytesToSort: + description: Limits the maximum number of bytes (uncompressed + data) that can be read from a table for sorting. + type: number + maxBytesToTransfer: + description: Limits the maximum number of bytes (uncompressed + data) that can be passed to a remote server or saved + in a temporary table when using GLOBAL IN. + type: number + maxColumnsToRead: + description: Limits the maximum number of columns + that can be read from a table in a single query. + type: number + maxConcurrentQueriesForUser: + description: 'The maximum number of concurrent requests + per user. Default value: 0 (no limit).' + type: number + maxExecutionTime: + description: Limits the maximum query execution time + in milliseconds. + type: number + maxExpandedAstElements: + description: Maximum abstract syntax tree depth after + after expansion of aliases. + type: number + maxFinalThreads: + description: Sets the maximum number of parallel threads + for the SELECT query data read phase with the FINAL + modifier. + type: number + maxHttpGetRedirects: + description: Limits the maximum number of HTTP GET + redirect hops for URL-engine tables. + type: number + maxInsertBlockSize: + description: The size of blocks (in a count of rows) + to form for insertion into a table. + type: number + maxInsertThreads: + description: 'The maximum number of threads to execute + the INSERT SELECT query. Default value: 0.' + type: number + maxMemoryUsage: + description: Limits the maximum memory usage (in bytes) + for processing queries on a single server. + type: number + maxMemoryUsageForUser: + description: Limits the maximum memory usage (in bytes) + for processing of user's queries on a single server. + type: number + maxNetworkBandwidth: + description: Limits the speed of the data exchange + over the network in bytes per second. + type: number + maxNetworkBandwidthForUser: + description: Limits the speed of the data exchange + over the network in bytes per second. + type: number + maxParserDepth: + description: Limits maximum recursion depth in the + recursive descent parser. Allows controlling the + stack size. Zero means unlimited. + type: number + maxQuerySize: + description: The maximum part of a query that can + be taken to RAM for parsing with the SQL parser. + type: number + maxReadBufferSize: + description: The maximum size of the buffer to read + from the filesystem. + type: number + maxReplicaDelayForDistributedQueries: + description: Disables lagging replicas for distributed + queries. + type: number + maxResultBytes: + description: Limits the number of bytes in the result. + type: number + maxResultRows: + description: Limits the number of rows in the result. + type: number + maxRowsInDistinct: + description: Limits the maximum number of different + rows when using DISTINCT. + type: number + maxRowsInJoin: + description: Limit on maximum size of the hash table + for JOIN, in rows. + type: number + maxRowsInSet: + description: Limit on the number of rows in the set + resulting from the execution of the IN section. + type: number + maxRowsToGroupBy: + description: Limits the maximum number of unique keys + received from aggregation function. + type: number + maxRowsToRead: + description: Limits the maximum number of rows that + can be read from a table when running a query. + type: number + maxRowsToSort: + description: Limits the maximum number of rows that + can be read from a table for sorting. + type: number + maxRowsToTransfer: + description: Limits the maximum number of rows that + can be passed to a remote server or saved in a temporary + table when using GLOBAL IN. + type: number + maxTemporaryColumns: + description: Limits the maximum number of temporary + columns that must be kept in RAM at the same time + when running a query, including constant columns. + type: number + maxTemporaryDataOnDiskSizeForQuery: + description: The maximum amount of data consumed by + temporary files on disk in bytes for all concurrently + running queries. Zero means unlimited. + type: number + maxTemporaryDataOnDiskSizeForUser: + description: The maximum amount of data consumed by + temporary files on disk in bytes for all concurrently + running user queries. Zero means unlimited. + type: number + maxTemporaryNonConstColumns: + description: Limits the maximum number of temporary + columns that must be kept in RAM at the same time + when running a query, excluding constant columns. + type: number + maxThreads: + description: The maximum number of query processing + threads, excluding threads for retrieving data from + remote servers. + type: number + memoryOvercommitRatioDenominator: + description: It represents soft memory limit in case + when hard limit is reached on user level. This value + is used to compute overcommit ratio for the query. + Zero means skip the query. + type: number + memoryOvercommitRatioDenominatorForUser: + description: It represents soft memory limit in case + when hard limit is reached on global level. This + value is used to compute overcommit ratio for the + query. Zero means skip the query. + type: number + memoryProfilerSampleProbability: + description: 'Collect random allocations and deallocations + and write them into system.trace_log with ''MemorySample'' + trace_type. The probability is for every alloc/free + regardless to the size of the allocation. Possible + values: from 0 to 1. Default: 0.' + type: number + memoryProfilerStep: + description: 'Memory profiler step (in bytes). If + the next query step requires more memory than this + parameter specifies, the memory profiler collects + the allocating stack trace. Values lower than a + few megabytes slow down query processing. Default + value: 4194304 (4 MB). Zero means disabled memory + profiler.' + type: number + memoryUsageOvercommitMaxWaitMicroseconds: + description: Maximum time thread will wait for memory + to be freed in the case of memory overcommit on + a user level. If the timeout is reached and memory + is not freed, an exception is thrown. + type: number + mergeTreeMaxBytesToUseCache: + description: If ClickHouse should read more than merge_tree_max_bytes_to_use_cache + bytes in one query, it doesn’t use the cache of + uncompressed blocks. + type: number + mergeTreeMaxRowsToUseCache: + description: If ClickHouse should read more than merge_tree_max_rows_to_use_cache + rows in one query, it doesn’t use the cache of uncompressed + blocks. + type: number + mergeTreeMinBytesForConcurrentRead: + description: If the number of bytes to read from one + file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, + then ClickHouse tries to concurrently read from + this file in several threads. + type: number + mergeTreeMinRowsForConcurrentRead: + description: If the number of rows to be read from + a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read + then ClickHouse tries to perform a concurrent reading + from this file on several threads. + type: number + minBytesToUseDirectIo: + description: The minimum data volume required for + using direct I/O access to the storage disk. + type: number + minCountToCompile: + description: How many times to potentially use a compiled + chunk of code before running compilation. + type: number + minCountToCompileExpression: + description: A query waits for expression compilation + process to complete prior to continuing execution. + type: number + minExecutionSpeed: + description: Minimal execution speed in rows per second. + type: number + minExecutionSpeedBytes: + description: Minimal execution speed in bytes per + second. + type: number + minInsertBlockSizeBytes: + description: Sets the minimum number of bytes in the + block which can be inserted into a table by an INSERT + query. + type: number + minInsertBlockSizeRows: + description: Sets the minimum number of rows in the + block which can be inserted into a table by an INSERT + query. + type: number + outputFormatJsonQuote64BitIntegers: + description: If the value is true, integers appear + in quotes when using JSON* Int64 and UInt64 formats + (for compatibility with most JavaScript implementations); + otherwise, integers are output without the quotes. + type: boolean + outputFormatJsonQuoteDenormals: + description: Enables +nan, -nan, +inf, -inf outputs + in JSON output format. + type: boolean + preferLocalhostReplica: + description: 'Enables/disables preferable using the + localhost replica when processing distributed queries. + Default value: true.' + type: boolean + priority: + description: Query priority. + type: number + quotaMode: + description: Quota accounting mode. + type: string + readOverflowMode: + description: 'Sets behaviour on overflow while read. + Possible values:' + type: string + readonly: + description: Restricts permissions for reading data, + write data and change settings queries. + type: number + receiveTimeout: + description: Receive timeout in milliseconds on the + socket used for communicating with the client. + type: number + remoteFilesystemReadMethod: + description: 'Method of reading data from remote filesystem, + one of: read, threadpool.' + type: string + replicationAlterPartitionsSync: + description: For ALTER ... ATTACH|DETACH|DROP queries, + you can use the replication_alter_partitions_sync + setting to set up waiting. + type: number + resultOverflowMode: + description: 'Sets behaviour on overflow in result. + Possible values:' + type: string + selectSequentialConsistency: + description: Enables or disables sequential consistency + for SELECT queries. + type: boolean + sendProgressInHttpHeaders: + description: Enables or disables X-ClickHouse-Progress + HTTP response headers in clickhouse-server responses. + type: boolean + sendTimeout: + description: Send timeout in milliseconds on the socket + used for communicating with the client. + type: number + setOverflowMode: + description: 'Sets behaviour on overflow in the set + resulting. Possible values:' + type: string + skipUnavailableShards: + description: Enables or disables silently skipping + of unavailable shards. + type: boolean + sortOverflowMode: + description: 'Sets behaviour on overflow while sort. + Possible values:' + type: string + timeoutBeforeCheckingExecutionSpeed: + description: Timeout (in seconds) between checks of + execution speed. It is checked that execution speed + is not less that specified in min_execution_speed + parameter. Must be at least 1000. + type: number + timeoutOverflowMode: + description: 'Sets behaviour on overflow. Possible + values:' + type: string + transferOverflowMode: + description: 'Sets behaviour on overflow. Possible + values:' + type: string + transformNullIn: + description: Enables equality of NULL values for IN + operator. + type: boolean + useHedgedRequests: + description: 'Enables hedged requests logic for remote + queries. It allows to establish many connections + with different replicas for query. New connection + is enabled in case existent connection(s) with replica(s) + were not established within hedged_connection_timeout + or no data was received within receive_data_timeout. + Query uses the first connection which send non empty + progress packet (or data packet, if allow_changing_replica_until_first_data_packet); + other connections are cancelled. Queries with max_parallel_replicas + > 1 are supported. Default value: true.' + type: boolean + useUncompressedCache: + description: Whether to use a cache of uncompressed + blocks. + type: boolean + waitForAsyncInsert: + description: Enables waiting for processing of asynchronous + insertion. If enabled, server returns OK only after + the data is inserted. + type: boolean + waitForAsyncInsertTimeout: + description: The timeout (in seconds) for waiting + for processing of asynchronous insertion. Value + must be at least 1000 (1 second). + type: number + type: object + type: array + type: object + type: array + version: + description: Version of the ClickHouse server software. + type: string + zookeeper: + description: Configuration of the ZooKeeper subcluster. The structure + is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the ZooKeeper + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper hosts. + For more information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + access: + description: Access policy to the ClickHouse cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for DataLens. Can be either true + or false. + type: boolean + dataTransfer: + description: Allow access for DataTransfer. Can be either + true or false. + type: boolean + metrika: + description: Allow access for Yandex.Metrika. Can be either + true or false. + type: boolean + serverless: + description: Allow access for Serverless. Can be either + true or false. + type: boolean + webSql: + description: Allow access for Web SQL. Can be either true + or false. + type: boolean + yandexQuery: + description: Allow access for YandexQuery. Can be either + true or false. + type: boolean + type: object + type: array + adminPasswordSecretRef: + description: A password used to authorize as user admin when sql_user_management + enabled. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + backupRetainPeriodDays: + description: The period in days during which backups are stored. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC timezone. + The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + clickhouse: + description: Configuration of the ClickHouse subcluster. The structure + is documented below. + items: + properties: + config: + description: Main ClickHouse cluster configuration. + items: + properties: + asynchronousInsertLogEnabled: + type: boolean + asynchronousInsertLogRetentionSize: + type: number + asynchronousInsertLogRetentionTime: + type: number + asynchronousMetricLogEnabled: + type: boolean + asynchronousMetricLogRetentionSize: + type: number + asynchronousMetricLogRetentionTime: + type: number + backgroundBufferFlushSchedulePoolSize: + type: number + backgroundCommonPoolSize: + type: number + backgroundDistributedSchedulePoolSize: + type: number + backgroundFetchesPoolSize: + type: number + backgroundMergesMutationsConcurrencyRatio: + type: number + backgroundMessageBrokerSchedulePoolSize: + type: number + backgroundMovePoolSize: + type: number + backgroundPoolSize: + type: number + backgroundSchedulePoolSize: + type: number + compression: + description: Data compression configuration. The structure + is documented below. + items: + properties: + level: + description: Compression level for ZSTD method. + type: number + method: + description: 'Method: Compression method. Two + methods are available: LZ4 and zstd.' + type: string + minPartSize: + description: 'Min part size: Minimum size (in + bytes) of a data part in a table. ClickHouse + only applies the rule to tables with data + parts greater than or equal to the Min part + size value.' + type: number + minPartSizeRatio: + description: 'Min part size ratio: Minimum table + part size to total table size ratio. ClickHouse + only applies the rule to tables in which this + ratio is greater than or equal to the Min + part size ratio value.' + type: number + type: object + type: array + defaultDatabase: + description: A database of the ClickHouse cluster. + The structure is documented below. + type: string + dictionariesLazyLoad: + type: boolean + geobaseEnabled: + type: boolean + geobaseUri: + type: string + graphiteRollup: + description: Graphite rollup configuration. The structure + is documented below. + items: + properties: + name: + description: The name of the user. + type: string + pathColumnName: + description: 'The name of the column storing + the metric name (Graphite sensor). Default + value: Path.' + type: string + pattern: + description: Set of thinning rules. + items: + properties: + function: + description: Aggregation function name. + type: string + regexp: + description: Regular expression that the + metric name must match. + type: string + retention: + description: Retain parameters. + items: + properties: + age: + description: Minimum data age in + seconds. + type: number + precision: + description: Accuracy of determining + the age of the data in seconds. + type: number + type: object + type: array + type: object + type: array + timeColumnName: + description: 'The name of the column storing + the time of measuring the metric. Default + value: Time.' + type: string + valueColumnName: + description: 'The name of the column storing + the value of the metric at the time set in + time_column_name. Default value: Value.' + type: string + versionColumnName: + description: 'The name of the column storing + the version of the metric. Default value: + Timestamp.' + type: string + type: object + type: array + kafka: + description: Kafka connection configuration. The structure + is documented below. + items: + properties: + autoOffsetReset: + description: 'Action to take when there is no + initial offset in offset store or the desired + offset is out of range: ''smallest'',''earliest'' + - automatically reset the offset to the smallest + offset, ''largest'',''latest'' - automatically + reset the offset to the largest offset, ''error'' + - trigger an error (ERR__AUTO_OFFSET_RESET) + which is retrieved by consuming messages and + checking ''message->err''.' + type: string + debug: + description: A comma-separated list of debug + contexts to enable. + type: string + enableSslCertificateVerification: + description: enable verification of SSL certificates. + type: boolean + maxPollIntervalMs: + description: Maximum allowed time between calls + to consume messages (e.g., rd_kafka_consumer_poll()) + for high-level consumers. If this interval + is exceeded the consumer is considered failed + and the group will rebalance in order to reassign + the partitions to another consumer group member. + type: number + saslMechanism: + description: SASL mechanism used in kafka authentication. + type: string + saslPasswordSecretRef: + description: User password on kafka server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Username on kafka server. + type: string + securityProtocol: + description: Security protocol used to connect + to kafka server. + type: string + sessionTimeoutMs: + description: Client group session and failure + detection timeout. The consumer sends periodic + heartbeats (heartbeat.interval.ms) to indicate + its liveness to the broker. If no hearts are + received by the broker for a group member + within the session timeout, the broker will + remove the consumer from the group and trigger + a rebalance. + type: number + type: object + type: array + kafkaTopic: + description: Kafka topic connection configuration. + The structure is documented below. + items: + properties: + name: + description: The name of the user. + type: string + settings: + description: Custom settings for user. The list + is documented below. + items: + properties: + autoOffsetReset: + description: 'Action to take when there + is no initial offset in offset store + or the desired offset is out of range: + ''smallest'',''earliest'' - automatically + reset the offset to the smallest offset, + ''largest'',''latest'' - automatically + reset the offset to the largest offset, + ''error'' - trigger an error (ERR__AUTO_OFFSET_RESET) + which is retrieved by consuming messages + and checking ''message->err''.' + type: string + debug: + description: A comma-separated list of + debug contexts to enable. + type: string + enableSslCertificateVerification: + description: enable verification of SSL + certificates. + type: boolean + maxPollIntervalMs: + description: Maximum allowed time between + calls to consume messages (e.g., rd_kafka_consumer_poll()) + for high-level consumers. If this interval + is exceeded the consumer is considered + failed and the group will rebalance + in order to reassign the partitions + to another consumer group member. + type: number + saslMechanism: + description: SASL mechanism used in kafka + authentication. + type: string + saslPasswordSecretRef: + description: User password on kafka server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Username on kafka server. + type: string + securityProtocol: + description: Security protocol used to + connect to kafka server. + type: string + sessionTimeoutMs: + description: Client group session and + failure detection timeout. The consumer + sends periodic heartbeats (heartbeat.interval.ms) + to indicate its liveness to the broker. + If no hearts are received by the broker + for a group member within the session + timeout, the broker will remove the + consumer from the group and trigger + a rebalance. + type: number + type: object + type: array + type: object + type: array + keepAliveTimeout: + type: number + logLevel: + description: ClickHouse server parameters. For more + information, see the official documentation. + type: string + markCacheSize: + type: number + maxConcurrentQueries: + type: number + maxConnections: + type: number + maxPartitionSizeToDrop: + type: number + maxTableSizeToDrop: + type: number + mergeTree: + description: MergeTree engine configuration. The structure + is documented below. + items: + properties: + allowRemoteFsZeroCopyReplication: + description: When this setting has a value greater + than zero only a single replica starts the + merge immediately if merged part on shared + storage and allow_remote_fs_zero_copy_replication + is enabled. + type: boolean + checkSampleColumnIsCorrect: + description: 'Enables the check at table creation, + that the data type of a column for sampling + or sampling expression is correct. The data + type must be one of unsigned integer types: + UInt8, UInt16, UInt32, UInt64. Default value: + true.' + type: boolean + cleanupDelayPeriod: + description: Minimum period to clean old queue + logs, blocks hashes and parts. + type: number + inactivePartsToDelayInsert: + description: If the number of inactive parts + in a single partition in the table at least + that many the inactive_parts_to_delay_insert + value, an INSERT artificially slows down. + It is useful when a server fails to clean + up parts quickly enough. + type: number + inactivePartsToThrowInsert: + description: If the number of inactive parts + in a single partition more than the inactive_parts_to_throw_insert + value, INSERT is interrupted with the "Too + many inactive parts (N). Parts cleaning are + processing significantly slower than inserts" + exception. + type: number + maxAvgPartSizeForTooManyParts: + description: The too many parts check according + to parts_to_delay_insert and parts_to_throw_insert + will be active only if the average part size + (in the relevant partition) is not larger + than the specified threshold. If it is larger + than the specified threshold, the INSERTs + will be neither delayed or rejected. This + allows to have hundreds of terabytes in a + single table on a single server if the parts + are successfully merged to larger parts. This + does not affect the thresholds on inactive + parts or total parts. + type: number + maxBytesToMergeAtMaxSpaceInPool: + description: The maximum total parts size (in + bytes) to be merged into one part, if there + are enough resources available. max_bytes_to_merge_at_max_space_in_pool + -- roughly corresponds to the maximum possible + part size created by an automatic background + merge. + type: number + maxBytesToMergeAtMinSpaceInPool: + description: 'Max bytes to merge at min space + in pool: Maximum total size of a data part + to merge when the number of free threads in + the background pool is minimum.' + type: number + maxCleanupDelayPeriod: + description: 'Maximum period to clean old queue + logs, blocks hashes and parts. Default value: + 300 seconds.' + type: number + maxMergeSelectingSleepMs: + description: 'Maximum sleep time for merge selecting, + a lower setting will trigger selecting tasks + in background_schedule_pool frequently which + result in large amount of requests to zookeeper + in large-scale clusters. Default value: 60000 + milliseconds (60 seconds).' + type: number + maxNumberOfMergesWithTtlInPool: + description: When there is more than specified + number of merges with TTL entries in pool, + do not assign new merge with TTL. + type: number + maxPartsInTotal: + description: Maximum number of parts in all + partitions. + type: number + maxReplicatedMergesInQueue: + description: 'Max replicated merges in queue: + Maximum number of merge tasks that can be + in the ReplicatedMergeTree queue at the same + time.' + type: number + mergeMaxBlockSize: + description: 'The number of rows that are read + from the merged parts into memory. Default + value: 8192.' + type: number + mergeSelectingSleepMs: + description: Sleep time for merge selecting + when no part is selected. A lower setting + triggers selecting tasks in background_schedule_pool + frequently, which results in a large number + of requests to ClickHouse Keeper in large-scale + clusters. + type: number + mergeWithRecompressionTtlTimeout: + description: 'Minimum delay in seconds before + repeating a merge with recompression TTL. + Default value: 14400 seconds (4 hours).' + type: number + mergeWithTtlTimeout: + description: 'Minimum delay in seconds before + repeating a merge with delete TTL. Default + value: 14400 seconds (4 hours).' + type: number + minAgeToForceMergeOnPartitionOnly: + description: Whether min_age_to_force_merge_seconds + should be applied only on the entire partition + and not on subset. + type: boolean + minAgeToForceMergeSeconds: + description: Merge parts if every part in the + range is older than the value of min_age_to_force_merge_seconds. + type: number + minBytesForWidePart: + description: Minimum number of bytes in a data + part that can be stored in Wide format. You + can set one, both or none of these settings. + type: number + minRowsForWidePart: + description: Minimum number of rows in a data + part that can be stored in Wide format. You + can set one, both or none of these settings. + type: number + numberOfFreeEntriesInPoolToExecuteMutation: + description: 'When there is less than specified + number of free entries in pool, do not execute + part mutations. This is to leave free threads + for regular merges and avoid "Too many parts". + Default value: 20.' + type: number + numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: + description: 'Number of free entries in pool + to lower max size of merge: Threshold value + of free entries in the pool. If the number + of entries in the pool falls below this value, + ClickHouse reduces the maximum size of a data + part to merge. This helps handle small merges + faster, rather than filling the pool with + lengthy merges.' + type: number + partsToDelayInsert: + description: 'Parts to delay insert: Number + of active data parts in a table, on exceeding + which ClickHouse starts artificially reduce + the rate of inserting data into the table.' + type: number + partsToThrowInsert: + description: 'Parts to throw insert: Threshold + value of active data parts in a table, on + exceeding which ClickHouse throws the ''Too + many parts ...'' exception.' + type: number + replicatedDeduplicationWindow: + description: 'Replicated deduplication window: + Number of recent hash blocks that ZooKeeper + will store (the old ones will be deleted).' + type: number + replicatedDeduplicationWindowSeconds: + description: 'Replicated deduplication window + seconds: Time during which ZooKeeper stores + the hash blocks (the old ones wil be deleted).' + type: number + ttlOnlyDropParts: + description: Enables zero-copy replication when + a replica is located on a remote filesystem. + type: boolean + type: object + type: array + metricLogEnabled: + type: boolean + metricLogRetentionSize: + type: number + metricLogRetentionTime: + type: number + opentelemetrySpanLogEnabled: + type: boolean + opentelemetrySpanLogRetentionSize: + type: number + opentelemetrySpanLogRetentionTime: + type: number + partLogRetentionSize: + type: number + partLogRetentionTime: + type: number + queryCache: + description: Query cache configuration. The structure + is documented below. + items: + properties: + maxEntries: + description: 'The maximum number of SELECT query + results stored in the cache. Default value: + 1024.' + type: number + maxEntrySizeInBytes: + description: 'The maximum size in bytes SELECT + query results may have to be saved in the + cache. Default value: 1048576 (1 MiB).' + type: number + maxEntrySizeInRows: + description: 'The maximum number of rows SELECT + query results may have to be saved in the + cache. Default value: 30000000 (30 mil).' + type: number + maxSizeInBytes: + description: 'The maximum cache size in bytes. + 0 means the query cache is disabled. Default + value: 1073741824 (1 GiB).' + type: number + type: object + type: array + queryLogRetentionSize: + type: number + queryLogRetentionTime: + type: number + queryMaskingRules: + description: Query masking rules configuration. The + structure is documented below. + items: + properties: + name: + description: The name of the user. + type: string + regexp: + description: Regular expression that the metric + name must match. + type: string + replace: + description: 'Substitution string for sensitive + data. Default value: six asterisks.' + type: string + type: object + type: array + queryThreadLogEnabled: + type: boolean + queryThreadLogRetentionSize: + type: number + queryThreadLogRetentionTime: + type: number + queryViewsLogEnabled: + type: boolean + queryViewsLogRetentionSize: + type: number + queryViewsLogRetentionTime: + type: number + rabbitmq: + description: RabbitMQ connection configuration. The + structure is documented below. + items: + properties: + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: RabbitMQ username. + type: string + vhost: + description: 'RabbitMQ vhost. Default: ”.' + type: string + type: object + type: array + sessionLogEnabled: + type: boolean + sessionLogRetentionSize: + type: number + sessionLogRetentionTime: + type: number + textLogEnabled: + type: boolean + textLogLevel: + type: string + textLogRetentionSize: + type: number + textLogRetentionTime: + type: number + timezone: + type: string + totalMemoryProfilerStep: + type: number + traceLogEnabled: + type: boolean + traceLogRetentionSize: + type: number + traceLogRetentionTime: + type: number + uncompressedCacheSize: + type: number + zookeeperLogEnabled: + type: boolean + zookeeperLogRetentionSize: + type: number + zookeeperLogRetentionTime: + type: number + type: object + type: array + resources: + description: Resources allocated to hosts of the ClickHouse + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper hosts. + For more information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + cloudStorage: + description: Minimum data age in seconds. + items: + properties: + dataCacheEnabled: + description: Enables temporary storage in the cluster repository + of data requested from the object repository. + type: boolean + dataCacheMaxSize: + description: Defines the maximum amount of memory (in bytes) + allocated in the cluster storage for temporary storage + of data requested from the object storage. + type: number + enabled: + description: Whether to use Yandex Object Storage for storing + ClickHouse data. Can be either true or false. + type: boolean + moveFactor: + description: Sets the minimum free space ratio in the cluster + storage. If the free space is lower than this value, the + data is transferred to Yandex Object Storage. Acceptable + values are 0 to 1, inclusive. + type: number + preferNotToMerge: + description: Disables merging of data parts in Yandex Object + Storage. + type: boolean + type: object + type: array + clusterId: + type: string + copySchemaOnNewHosts: + description: Whether to copy schema on new ClickHouse hosts. + type: boolean + database: + description: A database of the ClickHouse cluster. The structure + is documented below. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the ClickHouse cluster. + type: string + embeddedKeeper: + description: Whether to use ClickHouse Keeper as a coordination + system and place it on the same hosts with ClickHouse. If not, + it's used ZooKeeper with placement on separate hosts. + type: boolean + environment: + description: Deployment environment of the ClickHouse cluster. + Can be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + formatSchema: + description: A set of protobuf or capnproto format schemas. The + structure is documented below. + items: + properties: + name: + description: The name of the format schema. + type: string + type: + description: Type of the format schema. + type: string + uri: + description: Format schema file URL. You can only use format + schemas stored in Yandex Object Storage. + type: string + type: object + type: array + host: + description: A host of the ClickHouse cluster. The structure is + documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. Can be either true or false. + type: boolean + shardName: + description: The name of the shard to which the host belongs. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The type of the host to be deployed. Can be + either CLICKHOUSE or ZOOKEEPER. + type: string + zone: + description: The availability zone where the ClickHouse + host will be created. For more information see the official + documentation. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the ClickHouse + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + mlModel: + description: A group of machine learning models. The structure + is documented below + items: + properties: + name: + description: The name of the ml model. + type: string + type: + description: Type of the model. + type: string + uri: + description: Model file URL. You can only use models stored + in Yandex Object Storage. + type: string + type: object + type: array + name: + description: Name of the ClickHouse cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the ClickHouse cluster + belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountId: + description: ID of the service account used for access to Yandex + Object Storage. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + shard: + items: + properties: + name: + description: The name of shard. + type: string + resources: + description: Resources allocated to host of the shard. The + resources specified for the shard takes precedence over + the resources specified for the cluster. The structure + is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper hosts. + For more information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + weight: + description: The weight of shard. + type: number + type: object + type: array + shardGroup: + description: A group of clickhouse shards. The structure is documented + below. + items: + properties: + description: + description: Description of the shard group. + type: string + name: + description: The name of the shard group, used as cluster + name in Distributed tables. + type: string + shardNames: + description: List of shards names that belong to the shard + group. + items: + type: string + type: array + type: object + type: array + sqlDatabaseManagement: + description: Grants admin user database management permission. + type: boolean + sqlUserManagement: + description: Enables admin user with user management permission. + type: boolean + user: + description: A user of the ClickHouse cluster. The structure is + documented below. + items: + properties: + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + type: object + type: array + quota: + description: Set of user quotas. The structure is documented + below. + items: + properties: + errors: + description: The number of queries that threw exception. + type: number + executionTime: + description: The total query execution time, in milliseconds + (wall time). + type: number + intervalDuration: + description: Duration of interval for quota in milliseconds. + type: number + queries: + description: The total number of queries. + type: number + readRows: + description: The total number of source rows read + from tables for running the query, on all remote + servers. + type: number + resultRows: + description: The total number of rows given as the + result. + type: number + type: object + type: array + settings: + description: Custom settings for user. The list is documented + below. + items: + properties: + addHttpCorsHeader: + description: Include CORS headers in HTTP responces. + type: boolean + allowDdl: + description: Allows or denies DDL queries. + type: boolean + allowIntrospectionFunctions: + description: Enables introspections functions for + query profiling. + type: boolean + allowSuspiciousLowCardinalityTypes: + description: Allows specifying LowCardinality modifier + for types of small fixed size (8 or less) in CREATE + TABLE statements. Enabling this may increase merge + times and memory consumption. + type: boolean + anyJoinDistinctRightTableKeys: + description: enables legacy ClickHouse server behaviour + in ANY INNER|LEFT JOIN operations. + type: boolean + asyncInsert: + description: Enables asynchronous inserts. Disabled + by default. + type: boolean + asyncInsertBusyTimeout: + description: 'The maximum timeout in milliseconds + since the first INSERT query before inserting collected + data. If the parameter is set to 0, the timeout + is disabled. Default value: 200.' + type: number + asyncInsertMaxDataSize: + description: 'The maximum size of the unparsed data + in bytes collected per query before being inserted. + If the parameter is set to 0, asynchronous insertions + are disabled. Default value: 100000.' + type: number + asyncInsertStaleTimeout: + description: The maximum timeout in milliseconds since + the last INSERT query before dumping collected data. + If enabled, the settings prolongs the async_insert_busy_timeout + with every INSERT query as long as async_insert_max_data_size + is not exceeded. + type: number + asyncInsertThreads: + description: 'The maximum number of threads for background + data parsing and insertion. If the parameter is + set to 0, asynchronous insertions are disabled. + Default value: 16.' + type: number + cancelHttpReadonlyQueriesOnClientClose: + description: 'Cancels HTTP read-only queries (e.g. + SELECT) when a client closes the connection without + waiting for the response. Default value: false.' + type: boolean + compile: + description: Enable compilation of queries. + type: boolean + compileExpressions: + description: Turn on expression compilation. + type: boolean + connectTimeout: + description: Connect timeout in milliseconds on the + socket used for communicating with the client. + type: number + connectTimeoutWithFailover: + description: 'The timeout in milliseconds for connecting + to a remote server for a Distributed table engine, + if the ‘shard’ and ‘replica’ sections are used in + the cluster definition. If unsuccessful, several + attempts are made to connect to various replicas. + Default value: 50.' + type: number + countDistinctImplementation: + description: Specifies which of the uniq* functions + should be used to perform the COUNT(DISTINCT …) + construction. + type: string + dateTimeInputFormat: + description: 'Allows choosing a parser of the text + representation of date and time, one of: best_effort, + basic, best_effort_us. Default value: basic. Cloud + default value: best_effort.' + type: string + dateTimeOutputFormat: + description: 'Allows choosing different output formats + of the text representation of date and time, one + of: simple, iso, unix_timestamp. Default value: + simple.' + type: string + deduplicateBlocksInDependentMaterializedViews: + description: Enables or disables the deduplication + check for materialized views that receive data from + Replicated* tables. + type: boolean + distinctOverflowMode: + description: 'Sets behaviour on overflow when using + DISTINCT. Possible values:' + type: string + distributedAggregationMemoryEfficient: + description: Determine the behavior of distributed + subqueries. + type: boolean + distributedDdlTaskTimeout: + description: Timeout for DDL queries, in milliseconds. + type: number + distributedProductMode: + description: Changes the behaviour of distributed + subqueries. + type: string + emptyResultForAggregationByEmptySet: + description: Allows to retunr empty result. + type: boolean + enableHttpCompression: + description: Enables or disables data compression + in the response to an HTTP request. + type: boolean + fallbackToStaleReplicasForDistributedQueries: + description: Forces a query to an out-of-date replica + if updated data is not available. + type: boolean + flattenNested: + description: Sets the data format of a nested columns. + type: boolean + forceIndexByDate: + description: Disables query execution if the index + can’t be used by date. + type: boolean + forcePrimaryKey: + description: Disables query execution if indexing + by the primary key is not possible. + type: boolean + formatRegexp: + description: Regular expression (for Regexp format). + type: string + formatRegexpSkipUnmatched: + description: Skip lines unmatched by regular expression. + type: boolean + groupByOverflowMode: + description: 'Sets behaviour on overflow while GROUP + BY operation. Possible values:' + type: string + groupByTwoLevelThreshold: + description: Sets the threshold of the number of keys, + after that the two-level aggregation should be used. + type: number + groupByTwoLevelThresholdBytes: + description: Sets the threshold of the number of bytes, + after that the two-level aggregation should be used. + type: number + hedgedConnectionTimeoutMs: + description: 'Connection timeout for establishing + connection with replica for Hedged requests. Default + value: 50 milliseconds.' + type: number + httpConnectionTimeout: + description: Timeout for HTTP connection in milliseconds. + type: number + httpHeadersProgressInterval: + description: Sets minimal interval between notifications + about request process in HTTP header X-ClickHouse-Progress. + type: number + httpReceiveTimeout: + description: Timeout for HTTP connection in milliseconds. + type: number + httpSendTimeout: + description: Timeout for HTTP connection in milliseconds. + type: number + idleConnectionTimeout: + description: 'Timeout to close idle TCP connections + after specified number of seconds. Default value: + 3600 seconds.' + type: number + inputFormatDefaultsForOmittedFields: + description: When performing INSERT queries, replace + omitted input column values with default values + of the respective columns. + type: boolean + inputFormatImportNestedJson: + description: Enables or disables the insertion of + JSON data with nested objects. + type: boolean + inputFormatNullAsDefault: + description: Enables or disables the initialization + of NULL fields with default values, if data type + of these fields is not nullable. + type: boolean + inputFormatParallelParsing: + description: Enables or disables order-preserving + parallel parsing of data formats. Supported only + for TSV, TKSV, CSV and JSONEachRow formats. + type: boolean + inputFormatValuesInterpretExpressions: + description: Enables or disables the full SQL parser + if the fast stream parser can’t parse the data. + type: boolean + inputFormatWithNamesUseHeader: + description: Enables or disables checking the column + order when inserting data. + type: boolean + insertKeeperMaxRetries: + description: The setting sets the maximum number of + retries for ClickHouse Keeper (or ZooKeeper) requests + during insert into replicated MergeTree. Only Keeper + requests which failed due to network error, Keeper + session timeout, or request timeout are considered + for retries. + type: number + insertNullAsDefault: + description: 'Enables the insertion of default values + instead of NULL into columns with not nullable data + type. Default value: true.' + type: boolean + insertQuorum: + description: Enables the quorum writes. + type: number + insertQuorumParallel: + description: Enables or disables parallelism for quorum + INSERT queries. + type: boolean + insertQuorumTimeout: + description: Write to a quorum timeout in milliseconds. + type: number + joinAlgorithm: + description: 'Specifies which JOIN algorithm is used. + Possible values:' + items: + type: string + type: array + joinOverflowMode: + description: 'Sets behaviour on overflow in JOIN. + Possible values:' + type: string + joinUseNulls: + description: Sets the type of JOIN behaviour. When + merging tables, empty cells may appear. ClickHouse + fills them differently based on this setting. + type: boolean + joinedSubqueryRequiresAlias: + description: Require aliases for subselects and table + functions in FROM that more than one table is present. + type: boolean + loadBalancing: + description: 'Specifies the algorithm of replicas + selection that is used for distributed query processing, + one of: random, nearest_hostname, in_order, first_or_random, + round_robin. Default value: random.' + type: string + localFilesystemReadMethod: + description: 'Method of reading data from local filesystem. + Possible values:' + type: string + logQueryThreads: + description: 'Setting up query threads logging. Query + threads log into the system.query_thread_log table. + This setting has effect only when log_queries is + true. Queries’ threads run by ClickHouse with this + setup are logged according to the rules in the query_thread_log + server configuration parameter. Default value: true.' + type: boolean + lowCardinalityAllowInNativeFormat: + description: Allows or restricts using the LowCardinality + data type with the Native format. + type: boolean + maxAstDepth: + description: Maximum abstract syntax tree depth. + type: number + maxAstElements: + description: Maximum abstract syntax tree elements. + type: number + maxBlockSize: + description: A recommendation for what size of the + block (in a count of rows) to load from tables. + type: number + maxBytesBeforeExternalGroupBy: + description: Limit in bytes for using memoru for GROUP + BY before using swap on disk. + type: number + maxBytesBeforeExternalSort: + description: This setting is equivalent of the max_bytes_before_external_group_by + setting, except for it is for sort operation (ORDER + BY), not aggregation. + type: number + maxBytesInDistinct: + description: Limits the maximum size of a hash table + in bytes (uncompressed data) when using DISTINCT. + type: number + maxBytesInJoin: + description: Limit on maximum size of the hash table + for JOIN, in bytes. + type: number + maxBytesInSet: + description: Limit on the number of bytes in the set + resulting from the execution of the IN section. + type: number + maxBytesToRead: + description: Limits the maximum number of bytes (uncompressed + data) that can be read from a table when running + a query. + type: number + maxBytesToSort: + description: Limits the maximum number of bytes (uncompressed + data) that can be read from a table for sorting. + type: number + maxBytesToTransfer: + description: Limits the maximum number of bytes (uncompressed + data) that can be passed to a remote server or saved + in a temporary table when using GLOBAL IN. + type: number + maxColumnsToRead: + description: Limits the maximum number of columns + that can be read from a table in a single query. + type: number + maxConcurrentQueriesForUser: + description: 'The maximum number of concurrent requests + per user. Default value: 0 (no limit).' + type: number + maxExecutionTime: + description: Limits the maximum query execution time + in milliseconds. + type: number + maxExpandedAstElements: + description: Maximum abstract syntax tree depth after + after expansion of aliases. + type: number + maxFinalThreads: + description: Sets the maximum number of parallel threads + for the SELECT query data read phase with the FINAL + modifier. + type: number + maxHttpGetRedirects: + description: Limits the maximum number of HTTP GET + redirect hops for URL-engine tables. + type: number + maxInsertBlockSize: + description: The size of blocks (in a count of rows) + to form for insertion into a table. + type: number + maxInsertThreads: + description: 'The maximum number of threads to execute + the INSERT SELECT query. Default value: 0.' + type: number + maxMemoryUsage: + description: Limits the maximum memory usage (in bytes) + for processing queries on a single server. + type: number + maxMemoryUsageForUser: + description: Limits the maximum memory usage (in bytes) + for processing of user's queries on a single server. + type: number + maxNetworkBandwidth: + description: Limits the speed of the data exchange + over the network in bytes per second. + type: number + maxNetworkBandwidthForUser: + description: Limits the speed of the data exchange + over the network in bytes per second. + type: number + maxParserDepth: + description: Limits maximum recursion depth in the + recursive descent parser. Allows controlling the + stack size. Zero means unlimited. + type: number + maxQuerySize: + description: The maximum part of a query that can + be taken to RAM for parsing with the SQL parser. + type: number + maxReadBufferSize: + description: The maximum size of the buffer to read + from the filesystem. + type: number + maxReplicaDelayForDistributedQueries: + description: Disables lagging replicas for distributed + queries. + type: number + maxResultBytes: + description: Limits the number of bytes in the result. + type: number + maxResultRows: + description: Limits the number of rows in the result. + type: number + maxRowsInDistinct: + description: Limits the maximum number of different + rows when using DISTINCT. + type: number + maxRowsInJoin: + description: Limit on maximum size of the hash table + for JOIN, in rows. + type: number + maxRowsInSet: + description: Limit on the number of rows in the set + resulting from the execution of the IN section. + type: number + maxRowsToGroupBy: + description: Limits the maximum number of unique keys + received from aggregation function. + type: number + maxRowsToRead: + description: Limits the maximum number of rows that + can be read from a table when running a query. + type: number + maxRowsToSort: + description: Limits the maximum number of rows that + can be read from a table for sorting. + type: number + maxRowsToTransfer: + description: Limits the maximum number of rows that + can be passed to a remote server or saved in a temporary + table when using GLOBAL IN. + type: number + maxTemporaryColumns: + description: Limits the maximum number of temporary + columns that must be kept in RAM at the same time + when running a query, including constant columns. + type: number + maxTemporaryDataOnDiskSizeForQuery: + description: The maximum amount of data consumed by + temporary files on disk in bytes for all concurrently + running queries. Zero means unlimited. + type: number + maxTemporaryDataOnDiskSizeForUser: + description: The maximum amount of data consumed by + temporary files on disk in bytes for all concurrently + running user queries. Zero means unlimited. + type: number + maxTemporaryNonConstColumns: + description: Limits the maximum number of temporary + columns that must be kept in RAM at the same time + when running a query, excluding constant columns. + type: number + maxThreads: + description: The maximum number of query processing + threads, excluding threads for retrieving data from + remote servers. + type: number + memoryOvercommitRatioDenominator: + description: It represents soft memory limit in case + when hard limit is reached on user level. This value + is used to compute overcommit ratio for the query. + Zero means skip the query. + type: number + memoryOvercommitRatioDenominatorForUser: + description: It represents soft memory limit in case + when hard limit is reached on global level. This + value is used to compute overcommit ratio for the + query. Zero means skip the query. + type: number + memoryProfilerSampleProbability: + description: 'Collect random allocations and deallocations + and write them into system.trace_log with ''MemorySample'' + trace_type. The probability is for every alloc/free + regardless to the size of the allocation. Possible + values: from 0 to 1. Default: 0.' + type: number + memoryProfilerStep: + description: 'Memory profiler step (in bytes). If + the next query step requires more memory than this + parameter specifies, the memory profiler collects + the allocating stack trace. Values lower than a + few megabytes slow down query processing. Default + value: 4194304 (4 MB). Zero means disabled memory + profiler.' + type: number + memoryUsageOvercommitMaxWaitMicroseconds: + description: Maximum time thread will wait for memory + to be freed in the case of memory overcommit on + a user level. If the timeout is reached and memory + is not freed, an exception is thrown. + type: number + mergeTreeMaxBytesToUseCache: + description: If ClickHouse should read more than merge_tree_max_bytes_to_use_cache + bytes in one query, it doesn’t use the cache of + uncompressed blocks. + type: number + mergeTreeMaxRowsToUseCache: + description: If ClickHouse should read more than merge_tree_max_rows_to_use_cache + rows in one query, it doesn’t use the cache of uncompressed + blocks. + type: number + mergeTreeMinBytesForConcurrentRead: + description: If the number of bytes to read from one + file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, + then ClickHouse tries to concurrently read from + this file in several threads. + type: number + mergeTreeMinRowsForConcurrentRead: + description: If the number of rows to be read from + a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read + then ClickHouse tries to perform a concurrent reading + from this file on several threads. + type: number + minBytesToUseDirectIo: + description: The minimum data volume required for + using direct I/O access to the storage disk. + type: number + minCountToCompile: + description: How many times to potentially use a compiled + chunk of code before running compilation. + type: number + minCountToCompileExpression: + description: A query waits for expression compilation + process to complete prior to continuing execution. + type: number + minExecutionSpeed: + description: Minimal execution speed in rows per second. + type: number + minExecutionSpeedBytes: + description: Minimal execution speed in bytes per + second. + type: number + minInsertBlockSizeBytes: + description: Sets the minimum number of bytes in the + block which can be inserted into a table by an INSERT + query. + type: number + minInsertBlockSizeRows: + description: Sets the minimum number of rows in the + block which can be inserted into a table by an INSERT + query. + type: number + outputFormatJsonQuote64BitIntegers: + description: If the value is true, integers appear + in quotes when using JSON* Int64 and UInt64 formats + (for compatibility with most JavaScript implementations); + otherwise, integers are output without the quotes. + type: boolean + outputFormatJsonQuoteDenormals: + description: Enables +nan, -nan, +inf, -inf outputs + in JSON output format. + type: boolean + preferLocalhostReplica: + description: 'Enables/disables preferable using the + localhost replica when processing distributed queries. + Default value: true.' + type: boolean + priority: + description: Query priority. + type: number + quotaMode: + description: Quota accounting mode. + type: string + readOverflowMode: + description: 'Sets behaviour on overflow while read. + Possible values:' + type: string + readonly: + description: Restricts permissions for reading data, + write data and change settings queries. + type: number + receiveTimeout: + description: Receive timeout in milliseconds on the + socket used for communicating with the client. + type: number + remoteFilesystemReadMethod: + description: 'Method of reading data from remote filesystem, + one of: read, threadpool.' + type: string + replicationAlterPartitionsSync: + description: For ALTER ... ATTACH|DETACH|DROP queries, + you can use the replication_alter_partitions_sync + setting to set up waiting. + type: number + resultOverflowMode: + description: 'Sets behaviour on overflow in result. + Possible values:' + type: string + selectSequentialConsistency: + description: Enables or disables sequential consistency + for SELECT queries. + type: boolean + sendProgressInHttpHeaders: + description: Enables or disables X-ClickHouse-Progress + HTTP response headers in clickhouse-server responses. + type: boolean + sendTimeout: + description: Send timeout in milliseconds on the socket + used for communicating with the client. + type: number + setOverflowMode: + description: 'Sets behaviour on overflow in the set + resulting. Possible values:' + type: string + skipUnavailableShards: + description: Enables or disables silently skipping + of unavailable shards. + type: boolean + sortOverflowMode: + description: 'Sets behaviour on overflow while sort. + Possible values:' + type: string + timeoutBeforeCheckingExecutionSpeed: + description: Timeout (in seconds) between checks of + execution speed. It is checked that execution speed + is not less that specified in min_execution_speed + parameter. Must be at least 1000. + type: number + timeoutOverflowMode: + description: 'Sets behaviour on overflow. Possible + values:' + type: string + transferOverflowMode: + description: 'Sets behaviour on overflow. Possible + values:' + type: string + transformNullIn: + description: Enables equality of NULL values for IN + operator. + type: boolean + useHedgedRequests: + description: 'Enables hedged requests logic for remote + queries. It allows to establish many connections + with different replicas for query. New connection + is enabled in case existent connection(s) with replica(s) + were not established within hedged_connection_timeout + or no data was received within receive_data_timeout. + Query uses the first connection which send non empty + progress packet (or data packet, if allow_changing_replica_until_first_data_packet); + other connections are cancelled. Queries with max_parallel_replicas + > 1 are supported. Default value: true.' + type: boolean + useUncompressedCache: + description: Whether to use a cache of uncompressed + blocks. + type: boolean + waitForAsyncInsert: + description: Enables waiting for processing of asynchronous + insertion. If enabled, server returns OK only after + the data is inserted. + type: boolean + waitForAsyncInsertTimeout: + description: The timeout (in seconds) for waiting + for processing of asynchronous insertion. Value + must be at least 1000 (1 second). + type: number + type: object + type: array + required: + - passwordSecretRef + type: object + type: array + version: + description: Version of the ClickHouse server software. + type: string + zookeeper: + description: Configuration of the ZooKeeper subcluster. The structure + is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the ZooKeeper + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper hosts. + For more information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.environment is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.environment) + || (has(self.initProvider) && has(self.initProvider.environment))' + - message: spec.forProvider.host is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.host) + || (has(self.initProvider) && has(self.initProvider.host))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ClickhouseClusterStatus defines the observed state of ClickhouseCluster. + properties: + atProvider: + properties: + access: + description: Access policy to the ClickHouse cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for DataLens. Can be either true + or false. + type: boolean + dataTransfer: + description: Allow access for DataTransfer. Can be either + true or false. + type: boolean + metrika: + description: Allow access for Yandex.Metrika. Can be either + true or false. + type: boolean + serverless: + description: Allow access for Serverless. Can be either + true or false. + type: boolean + webSql: + description: Allow access for Web SQL. Can be either true + or false. + type: boolean + yandexQuery: + description: Allow access for YandexQuery. Can be either + true or false. + type: boolean + type: object + type: array + backupRetainPeriodDays: + description: The period in days during which backups are stored. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC timezone. + The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + clickhouse: + description: Configuration of the ClickHouse subcluster. The structure + is documented below. + items: + properties: + config: + description: Main ClickHouse cluster configuration. + items: + properties: + asynchronousInsertLogEnabled: + type: boolean + asynchronousInsertLogRetentionSize: + type: number + asynchronousInsertLogRetentionTime: + type: number + asynchronousMetricLogEnabled: + type: boolean + asynchronousMetricLogRetentionSize: + type: number + asynchronousMetricLogRetentionTime: + type: number + backgroundBufferFlushSchedulePoolSize: + type: number + backgroundCommonPoolSize: + type: number + backgroundDistributedSchedulePoolSize: + type: number + backgroundFetchesPoolSize: + type: number + backgroundMergesMutationsConcurrencyRatio: + type: number + backgroundMessageBrokerSchedulePoolSize: + type: number + backgroundMovePoolSize: + type: number + backgroundPoolSize: + type: number + backgroundSchedulePoolSize: + type: number + compression: + description: Data compression configuration. The structure + is documented below. + items: + properties: + level: + description: Compression level for ZSTD method. + type: number + method: + description: 'Method: Compression method. Two + methods are available: LZ4 and zstd.' + type: string + minPartSize: + description: 'Min part size: Minimum size (in + bytes) of a data part in a table. ClickHouse + only applies the rule to tables with data + parts greater than or equal to the Min part + size value.' + type: number + minPartSizeRatio: + description: 'Min part size ratio: Minimum table + part size to total table size ratio. ClickHouse + only applies the rule to tables in which this + ratio is greater than or equal to the Min + part size ratio value.' + type: number + type: object + type: array + defaultDatabase: + description: A database of the ClickHouse cluster. + The structure is documented below. + type: string + dictionariesLazyLoad: + type: boolean + geobaseEnabled: + type: boolean + geobaseUri: + type: string + graphiteRollup: + description: Graphite rollup configuration. The structure + is documented below. + items: + properties: + name: + description: The name of the user. + type: string + pathColumnName: + description: 'The name of the column storing + the metric name (Graphite sensor). Default + value: Path.' + type: string + pattern: + description: Set of thinning rules. + items: + properties: + function: + description: Aggregation function name. + type: string + regexp: + description: Regular expression that the + metric name must match. + type: string + retention: + description: Retain parameters. + items: + properties: + age: + description: Minimum data age in + seconds. + type: number + precision: + description: Accuracy of determining + the age of the data in seconds. + type: number + type: object + type: array + type: object + type: array + timeColumnName: + description: 'The name of the column storing + the time of measuring the metric. Default + value: Time.' + type: string + valueColumnName: + description: 'The name of the column storing + the value of the metric at the time set in + time_column_name. Default value: Value.' + type: string + versionColumnName: + description: 'The name of the column storing + the version of the metric. Default value: + Timestamp.' + type: string + type: object + type: array + kafka: + description: Kafka connection configuration. The structure + is documented below. + items: + properties: + autoOffsetReset: + description: 'Action to take when there is no + initial offset in offset store or the desired + offset is out of range: ''smallest'',''earliest'' + - automatically reset the offset to the smallest + offset, ''largest'',''latest'' - automatically + reset the offset to the largest offset, ''error'' + - trigger an error (ERR__AUTO_OFFSET_RESET) + which is retrieved by consuming messages and + checking ''message->err''.' + type: string + debug: + description: A comma-separated list of debug + contexts to enable. + type: string + enableSslCertificateVerification: + description: enable verification of SSL certificates. + type: boolean + maxPollIntervalMs: + description: Maximum allowed time between calls + to consume messages (e.g., rd_kafka_consumer_poll()) + for high-level consumers. If this interval + is exceeded the consumer is considered failed + and the group will rebalance in order to reassign + the partitions to another consumer group member. + type: number + saslMechanism: + description: SASL mechanism used in kafka authentication. + type: string + saslUsername: + description: Username on kafka server. + type: string + securityProtocol: + description: Security protocol used to connect + to kafka server. + type: string + sessionTimeoutMs: + description: Client group session and failure + detection timeout. The consumer sends periodic + heartbeats (heartbeat.interval.ms) to indicate + its liveness to the broker. If no hearts are + received by the broker for a group member + within the session timeout, the broker will + remove the consumer from the group and trigger + a rebalance. + type: number + type: object + type: array + kafkaTopic: + description: Kafka topic connection configuration. + The structure is documented below. + items: + properties: + name: + description: The name of the user. + type: string + settings: + description: Custom settings for user. The list + is documented below. + items: + properties: + autoOffsetReset: + description: 'Action to take when there + is no initial offset in offset store + or the desired offset is out of range: + ''smallest'',''earliest'' - automatically + reset the offset to the smallest offset, + ''largest'',''latest'' - automatically + reset the offset to the largest offset, + ''error'' - trigger an error (ERR__AUTO_OFFSET_RESET) + which is retrieved by consuming messages + and checking ''message->err''.' + type: string + debug: + description: A comma-separated list of + debug contexts to enable. + type: string + enableSslCertificateVerification: + description: enable verification of SSL + certificates. + type: boolean + maxPollIntervalMs: + description: Maximum allowed time between + calls to consume messages (e.g., rd_kafka_consumer_poll()) + for high-level consumers. If this interval + is exceeded the consumer is considered + failed and the group will rebalance + in order to reassign the partitions + to another consumer group member. + type: number + saslMechanism: + description: SASL mechanism used in kafka + authentication. + type: string + saslUsername: + description: Username on kafka server. + type: string + securityProtocol: + description: Security protocol used to + connect to kafka server. + type: string + sessionTimeoutMs: + description: Client group session and + failure detection timeout. The consumer + sends periodic heartbeats (heartbeat.interval.ms) + to indicate its liveness to the broker. + If no hearts are received by the broker + for a group member within the session + timeout, the broker will remove the + consumer from the group and trigger + a rebalance. + type: number + type: object + type: array + type: object + type: array + keepAliveTimeout: + type: number + logLevel: + description: ClickHouse server parameters. For more + information, see the official documentation. + type: string + markCacheSize: + type: number + maxConcurrentQueries: + type: number + maxConnections: + type: number + maxPartitionSizeToDrop: + type: number + maxTableSizeToDrop: + type: number + mergeTree: + description: MergeTree engine configuration. The structure + is documented below. + items: + properties: + allowRemoteFsZeroCopyReplication: + description: When this setting has a value greater + than zero only a single replica starts the + merge immediately if merged part on shared + storage and allow_remote_fs_zero_copy_replication + is enabled. + type: boolean + checkSampleColumnIsCorrect: + description: 'Enables the check at table creation, + that the data type of a column for sampling + or sampling expression is correct. The data + type must be one of unsigned integer types: + UInt8, UInt16, UInt32, UInt64. Default value: + true.' + type: boolean + cleanupDelayPeriod: + description: Minimum period to clean old queue + logs, blocks hashes and parts. + type: number + inactivePartsToDelayInsert: + description: If the number of inactive parts + in a single partition in the table at least + that many the inactive_parts_to_delay_insert + value, an INSERT artificially slows down. + It is useful when a server fails to clean + up parts quickly enough. + type: number + inactivePartsToThrowInsert: + description: If the number of inactive parts + in a single partition more than the inactive_parts_to_throw_insert + value, INSERT is interrupted with the "Too + many inactive parts (N). Parts cleaning are + processing significantly slower than inserts" + exception. + type: number + maxAvgPartSizeForTooManyParts: + description: The too many parts check according + to parts_to_delay_insert and parts_to_throw_insert + will be active only if the average part size + (in the relevant partition) is not larger + than the specified threshold. If it is larger + than the specified threshold, the INSERTs + will be neither delayed or rejected. This + allows to have hundreds of terabytes in a + single table on a single server if the parts + are successfully merged to larger parts. This + does not affect the thresholds on inactive + parts or total parts. + type: number + maxBytesToMergeAtMaxSpaceInPool: + description: The maximum total parts size (in + bytes) to be merged into one part, if there + are enough resources available. max_bytes_to_merge_at_max_space_in_pool + -- roughly corresponds to the maximum possible + part size created by an automatic background + merge. + type: number + maxBytesToMergeAtMinSpaceInPool: + description: 'Max bytes to merge at min space + in pool: Maximum total size of a data part + to merge when the number of free threads in + the background pool is minimum.' + type: number + maxCleanupDelayPeriod: + description: 'Maximum period to clean old queue + logs, blocks hashes and parts. Default value: + 300 seconds.' + type: number + maxMergeSelectingSleepMs: + description: 'Maximum sleep time for merge selecting, + a lower setting will trigger selecting tasks + in background_schedule_pool frequently which + result in large amount of requests to zookeeper + in large-scale clusters. Default value: 60000 + milliseconds (60 seconds).' + type: number + maxNumberOfMergesWithTtlInPool: + description: When there is more than specified + number of merges with TTL entries in pool, + do not assign new merge with TTL. + type: number + maxPartsInTotal: + description: Maximum number of parts in all + partitions. + type: number + maxReplicatedMergesInQueue: + description: 'Max replicated merges in queue: + Maximum number of merge tasks that can be + in the ReplicatedMergeTree queue at the same + time.' + type: number + mergeMaxBlockSize: + description: 'The number of rows that are read + from the merged parts into memory. Default + value: 8192.' + type: number + mergeSelectingSleepMs: + description: Sleep time for merge selecting + when no part is selected. A lower setting + triggers selecting tasks in background_schedule_pool + frequently, which results in a large number + of requests to ClickHouse Keeper in large-scale + clusters. + type: number + mergeWithRecompressionTtlTimeout: + description: 'Minimum delay in seconds before + repeating a merge with recompression TTL. + Default value: 14400 seconds (4 hours).' + type: number + mergeWithTtlTimeout: + description: 'Minimum delay in seconds before + repeating a merge with delete TTL. Default + value: 14400 seconds (4 hours).' + type: number + minAgeToForceMergeOnPartitionOnly: + description: Whether min_age_to_force_merge_seconds + should be applied only on the entire partition + and not on subset. + type: boolean + minAgeToForceMergeSeconds: + description: Merge parts if every part in the + range is older than the value of min_age_to_force_merge_seconds. + type: number + minBytesForWidePart: + description: Minimum number of bytes in a data + part that can be stored in Wide format. You + can set one, both or none of these settings. + type: number + minRowsForWidePart: + description: Minimum number of rows in a data + part that can be stored in Wide format. You + can set one, both or none of these settings. + type: number + numberOfFreeEntriesInPoolToExecuteMutation: + description: 'When there is less than specified + number of free entries in pool, do not execute + part mutations. This is to leave free threads + for regular merges and avoid "Too many parts". + Default value: 20.' + type: number + numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: + description: 'Number of free entries in pool + to lower max size of merge: Threshold value + of free entries in the pool. If the number + of entries in the pool falls below this value, + ClickHouse reduces the maximum size of a data + part to merge. This helps handle small merges + faster, rather than filling the pool with + lengthy merges.' + type: number + partsToDelayInsert: + description: 'Parts to delay insert: Number + of active data parts in a table, on exceeding + which ClickHouse starts artificially reduce + the rate of inserting data into the table.' + type: number + partsToThrowInsert: + description: 'Parts to throw insert: Threshold + value of active data parts in a table, on + exceeding which ClickHouse throws the ''Too + many parts ...'' exception.' + type: number + replicatedDeduplicationWindow: + description: 'Replicated deduplication window: + Number of recent hash blocks that ZooKeeper + will store (the old ones will be deleted).' + type: number + replicatedDeduplicationWindowSeconds: + description: 'Replicated deduplication window + seconds: Time during which ZooKeeper stores + the hash blocks (the old ones wil be deleted).' + type: number + ttlOnlyDropParts: + description: Enables zero-copy replication when + a replica is located on a remote filesystem. + type: boolean + type: object + type: array + metricLogEnabled: + type: boolean + metricLogRetentionSize: + type: number + metricLogRetentionTime: + type: number + opentelemetrySpanLogEnabled: + type: boolean + opentelemetrySpanLogRetentionSize: + type: number + opentelemetrySpanLogRetentionTime: + type: number + partLogRetentionSize: + type: number + partLogRetentionTime: + type: number + queryCache: + description: Query cache configuration. The structure + is documented below. + items: + properties: + maxEntries: + description: 'The maximum number of SELECT query + results stored in the cache. Default value: + 1024.' + type: number + maxEntrySizeInBytes: + description: 'The maximum size in bytes SELECT + query results may have to be saved in the + cache. Default value: 1048576 (1 MiB).' + type: number + maxEntrySizeInRows: + description: 'The maximum number of rows SELECT + query results may have to be saved in the + cache. Default value: 30000000 (30 mil).' + type: number + maxSizeInBytes: + description: 'The maximum cache size in bytes. + 0 means the query cache is disabled. Default + value: 1073741824 (1 GiB).' + type: number + type: object + type: array + queryLogRetentionSize: + type: number + queryLogRetentionTime: + type: number + queryMaskingRules: + description: Query masking rules configuration. The + structure is documented below. + items: + properties: + name: + description: The name of the user. + type: string + regexp: + description: Regular expression that the metric + name must match. + type: string + replace: + description: 'Substitution string for sensitive + data. Default value: six asterisks.' + type: string + type: object + type: array + queryThreadLogEnabled: + type: boolean + queryThreadLogRetentionSize: + type: number + queryThreadLogRetentionTime: + type: number + queryViewsLogEnabled: + type: boolean + queryViewsLogRetentionSize: + type: number + queryViewsLogRetentionTime: + type: number + rabbitmq: + description: RabbitMQ connection configuration. The + structure is documented below. + items: + properties: + username: + description: RabbitMQ username. + type: string + vhost: + description: 'RabbitMQ vhost. Default: ”.' + type: string + type: object + type: array + sessionLogEnabled: + type: boolean + sessionLogRetentionSize: + type: number + sessionLogRetentionTime: + type: number + textLogEnabled: + type: boolean + textLogLevel: + type: string + textLogRetentionSize: + type: number + textLogRetentionTime: + type: number + timezone: + type: string + totalMemoryProfilerStep: + type: number + traceLogEnabled: + type: boolean + traceLogRetentionSize: + type: number + traceLogRetentionTime: + type: number + uncompressedCacheSize: + type: number + zookeeperLogEnabled: + type: boolean + zookeeperLogRetentionSize: + type: number + zookeeperLogRetentionTime: + type: number + type: object + type: array + resources: + description: Resources allocated to hosts of the ClickHouse + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper hosts. + For more information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + cloudStorage: + description: Minimum data age in seconds. + items: + properties: + dataCacheEnabled: + description: Enables temporary storage in the cluster repository + of data requested from the object repository. + type: boolean + dataCacheMaxSize: + description: Defines the maximum amount of memory (in bytes) + allocated in the cluster storage for temporary storage + of data requested from the object storage. + type: number + enabled: + description: Whether to use Yandex Object Storage for storing + ClickHouse data. Can be either true or false. + type: boolean + moveFactor: + description: Sets the minimum free space ratio in the cluster + storage. If the free space is lower than this value, the + data is transferred to Yandex Object Storage. Acceptable + values are 0 to 1, inclusive. + type: number + preferNotToMerge: + description: Disables merging of data parts in Yandex Object + Storage. + type: boolean + type: object + type: array + clusterId: + type: string + copySchemaOnNewHosts: + description: Whether to copy schema on new ClickHouse hosts. + type: boolean + createdAt: + description: Timestamp of cluster creation. + type: string + database: + description: A database of the ClickHouse cluster. The structure + is documented below. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the ClickHouse cluster. + type: string + embeddedKeeper: + description: Whether to use ClickHouse Keeper as a coordination + system and place it on the same hosts with ClickHouse. If not, + it's used ZooKeeper with placement on separate hosts. + type: boolean + environment: + description: Deployment environment of the ClickHouse cluster. + Can be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + formatSchema: + description: A set of protobuf or capnproto format schemas. The + structure is documented below. + items: + properties: + name: + description: The name of the format schema. + type: string + type: + description: Type of the format schema. + type: string + uri: + description: Format schema file URL. You can only use format + schemas stored in Yandex Object Storage. + type: string + type: object + type: array + health: + description: Aggregated health of the cluster. Can be ALIVE, DEGRADED, + DEAD or HEALTH_UNKNOWN. For more information see health field + of JSON representation in the official documentation. + type: string + host: + description: A host of the ClickHouse cluster. The structure is + documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. Can be either true or false. + type: boolean + fqdn: + description: (Computed) The fully qualified domain name + of the host. + type: string + shardName: + description: The name of the shard to which the host belongs. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + type: + description: The type of the host to be deployed. Can be + either CLICKHOUSE or ZOOKEEPER. + type: string + zone: + description: The availability zone where the ClickHouse + host will be created. For more information see the official + documentation. + type: string + type: object + type: array + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the ClickHouse + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + mlModel: + description: A group of machine learning models. The structure + is documented below + items: + properties: + name: + description: The name of the ml model. + type: string + type: + description: Type of the model. + type: string + uri: + description: Model file URL. You can only use models stored + in Yandex Object Storage. + type: string + type: object + type: array + name: + description: Name of the ClickHouse cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the ClickHouse cluster + belongs. + type: string + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountId: + description: ID of the service account used for access to Yandex + Object Storage. + type: string + shard: + items: + properties: + name: + description: The name of shard. + type: string + resources: + description: Resources allocated to host of the shard. The + resources specified for the shard takes precedence over + the resources specified for the cluster. The structure + is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper hosts. + For more information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + weight: + description: The weight of shard. + type: number + type: object + type: array + shardGroup: + description: A group of clickhouse shards. The structure is documented + below. + items: + properties: + description: + description: Description of the shard group. + type: string + name: + description: The name of the shard group, used as cluster + name in Distributed tables. + type: string + shardNames: + description: List of shards names that belong to the shard + group. + items: + type: string + type: array + type: object + type: array + sqlDatabaseManagement: + description: Grants admin user database management permission. + type: boolean + sqlUserManagement: + description: Enables admin user with user management permission. + type: boolean + status: + description: Status of the cluster. Can be CREATING, STARTING, + RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. + For more information see status field of JSON representation + in the official documentation. + type: string + user: + description: A user of the ClickHouse cluster. The structure is + documented below. + items: + properties: + name: + description: The name of the user. + type: string + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + type: object + type: array + quota: + description: Set of user quotas. The structure is documented + below. + items: + properties: + errors: + description: The number of queries that threw exception. + type: number + executionTime: + description: The total query execution time, in milliseconds + (wall time). + type: number + intervalDuration: + description: Duration of interval for quota in milliseconds. + type: number + queries: + description: The total number of queries. + type: number + readRows: + description: The total number of source rows read + from tables for running the query, on all remote + servers. + type: number + resultRows: + description: The total number of rows given as the + result. + type: number + type: object + type: array + settings: + description: Custom settings for user. The list is documented + below. + items: + properties: + addHttpCorsHeader: + description: Include CORS headers in HTTP responces. + type: boolean + allowDdl: + description: Allows or denies DDL queries. + type: boolean + allowIntrospectionFunctions: + description: Enables introspections functions for + query profiling. + type: boolean + allowSuspiciousLowCardinalityTypes: + description: Allows specifying LowCardinality modifier + for types of small fixed size (8 or less) in CREATE + TABLE statements. Enabling this may increase merge + times and memory consumption. + type: boolean + anyJoinDistinctRightTableKeys: + description: enables legacy ClickHouse server behaviour + in ANY INNER|LEFT JOIN operations. + type: boolean + asyncInsert: + description: Enables asynchronous inserts. Disabled + by default. + type: boolean + asyncInsertBusyTimeout: + description: 'The maximum timeout in milliseconds + since the first INSERT query before inserting collected + data. If the parameter is set to 0, the timeout + is disabled. Default value: 200.' + type: number + asyncInsertMaxDataSize: + description: 'The maximum size of the unparsed data + in bytes collected per query before being inserted. + If the parameter is set to 0, asynchronous insertions + are disabled. Default value: 100000.' + type: number + asyncInsertStaleTimeout: + description: The maximum timeout in milliseconds since + the last INSERT query before dumping collected data. + If enabled, the settings prolongs the async_insert_busy_timeout + with every INSERT query as long as async_insert_max_data_size + is not exceeded. + type: number + asyncInsertThreads: + description: 'The maximum number of threads for background + data parsing and insertion. If the parameter is + set to 0, asynchronous insertions are disabled. + Default value: 16.' + type: number + cancelHttpReadonlyQueriesOnClientClose: + description: 'Cancels HTTP read-only queries (e.g. + SELECT) when a client closes the connection without + waiting for the response. Default value: false.' + type: boolean + compile: + description: Enable compilation of queries. + type: boolean + compileExpressions: + description: Turn on expression compilation. + type: boolean + connectTimeout: + description: Connect timeout in milliseconds on the + socket used for communicating with the client. + type: number + connectTimeoutWithFailover: + description: 'The timeout in milliseconds for connecting + to a remote server for a Distributed table engine, + if the ‘shard’ and ‘replica’ sections are used in + the cluster definition. If unsuccessful, several + attempts are made to connect to various replicas. + Default value: 50.' + type: number + countDistinctImplementation: + description: Specifies which of the uniq* functions + should be used to perform the COUNT(DISTINCT …) + construction. + type: string + dateTimeInputFormat: + description: 'Allows choosing a parser of the text + representation of date and time, one of: best_effort, + basic, best_effort_us. Default value: basic. Cloud + default value: best_effort.' + type: string + dateTimeOutputFormat: + description: 'Allows choosing different output formats + of the text representation of date and time, one + of: simple, iso, unix_timestamp. Default value: + simple.' + type: string + deduplicateBlocksInDependentMaterializedViews: + description: Enables or disables the deduplication + check for materialized views that receive data from + Replicated* tables. + type: boolean + distinctOverflowMode: + description: 'Sets behaviour on overflow when using + DISTINCT. Possible values:' + type: string + distributedAggregationMemoryEfficient: + description: Determine the behavior of distributed + subqueries. + type: boolean + distributedDdlTaskTimeout: + description: Timeout for DDL queries, in milliseconds. + type: number + distributedProductMode: + description: Changes the behaviour of distributed + subqueries. + type: string + emptyResultForAggregationByEmptySet: + description: Allows to retunr empty result. + type: boolean + enableHttpCompression: + description: Enables or disables data compression + in the response to an HTTP request. + type: boolean + fallbackToStaleReplicasForDistributedQueries: + description: Forces a query to an out-of-date replica + if updated data is not available. + type: boolean + flattenNested: + description: Sets the data format of a nested columns. + type: boolean + forceIndexByDate: + description: Disables query execution if the index + can’t be used by date. + type: boolean + forcePrimaryKey: + description: Disables query execution if indexing + by the primary key is not possible. + type: boolean + formatRegexp: + description: Regular expression (for Regexp format). + type: string + formatRegexpSkipUnmatched: + description: Skip lines unmatched by regular expression. + type: boolean + groupByOverflowMode: + description: 'Sets behaviour on overflow while GROUP + BY operation. Possible values:' + type: string + groupByTwoLevelThreshold: + description: Sets the threshold of the number of keys, + after that the two-level aggregation should be used. + type: number + groupByTwoLevelThresholdBytes: + description: Sets the threshold of the number of bytes, + after that the two-level aggregation should be used. + type: number + hedgedConnectionTimeoutMs: + description: 'Connection timeout for establishing + connection with replica for Hedged requests. Default + value: 50 milliseconds.' + type: number + httpConnectionTimeout: + description: Timeout for HTTP connection in milliseconds. + type: number + httpHeadersProgressInterval: + description: Sets minimal interval between notifications + about request process in HTTP header X-ClickHouse-Progress. + type: number + httpReceiveTimeout: + description: Timeout for HTTP connection in milliseconds. + type: number + httpSendTimeout: + description: Timeout for HTTP connection in milliseconds. + type: number + idleConnectionTimeout: + description: 'Timeout to close idle TCP connections + after specified number of seconds. Default value: + 3600 seconds.' + type: number + inputFormatDefaultsForOmittedFields: + description: When performing INSERT queries, replace + omitted input column values with default values + of the respective columns. + type: boolean + inputFormatImportNestedJson: + description: Enables or disables the insertion of + JSON data with nested objects. + type: boolean + inputFormatNullAsDefault: + description: Enables or disables the initialization + of NULL fields with default values, if data type + of these fields is not nullable. + type: boolean + inputFormatParallelParsing: + description: Enables or disables order-preserving + parallel parsing of data formats. Supported only + for TSV, TKSV, CSV and JSONEachRow formats. + type: boolean + inputFormatValuesInterpretExpressions: + description: Enables or disables the full SQL parser + if the fast stream parser can’t parse the data. + type: boolean + inputFormatWithNamesUseHeader: + description: Enables or disables checking the column + order when inserting data. + type: boolean + insertKeeperMaxRetries: + description: The setting sets the maximum number of + retries for ClickHouse Keeper (or ZooKeeper) requests + during insert into replicated MergeTree. Only Keeper + requests which failed due to network error, Keeper + session timeout, or request timeout are considered + for retries. + type: number + insertNullAsDefault: + description: 'Enables the insertion of default values + instead of NULL into columns with not nullable data + type. Default value: true.' + type: boolean + insertQuorum: + description: Enables the quorum writes. + type: number + insertQuorumParallel: + description: Enables or disables parallelism for quorum + INSERT queries. + type: boolean + insertQuorumTimeout: + description: Write to a quorum timeout in milliseconds. + type: number + joinAlgorithm: + description: 'Specifies which JOIN algorithm is used. + Possible values:' + items: + type: string + type: array + joinOverflowMode: + description: 'Sets behaviour on overflow in JOIN. + Possible values:' + type: string + joinUseNulls: + description: Sets the type of JOIN behaviour. When + merging tables, empty cells may appear. ClickHouse + fills them differently based on this setting. + type: boolean + joinedSubqueryRequiresAlias: + description: Require aliases for subselects and table + functions in FROM that more than one table is present. + type: boolean + loadBalancing: + description: 'Specifies the algorithm of replicas + selection that is used for distributed query processing, + one of: random, nearest_hostname, in_order, first_or_random, + round_robin. Default value: random.' + type: string + localFilesystemReadMethod: + description: 'Method of reading data from local filesystem. + Possible values:' + type: string + logQueryThreads: + description: 'Setting up query threads logging. Query + threads log into the system.query_thread_log table. + This setting has effect only when log_queries is + true. Queries’ threads run by ClickHouse with this + setup are logged according to the rules in the query_thread_log + server configuration parameter. Default value: true.' + type: boolean + lowCardinalityAllowInNativeFormat: + description: Allows or restricts using the LowCardinality + data type with the Native format. + type: boolean + maxAstDepth: + description: Maximum abstract syntax tree depth. + type: number + maxAstElements: + description: Maximum abstract syntax tree elements. + type: number + maxBlockSize: + description: A recommendation for what size of the + block (in a count of rows) to load from tables. + type: number + maxBytesBeforeExternalGroupBy: + description: Limit in bytes for using memoru for GROUP + BY before using swap on disk. + type: number + maxBytesBeforeExternalSort: + description: This setting is equivalent of the max_bytes_before_external_group_by + setting, except for it is for sort operation (ORDER + BY), not aggregation. + type: number + maxBytesInDistinct: + description: Limits the maximum size of a hash table + in bytes (uncompressed data) when using DISTINCT. + type: number + maxBytesInJoin: + description: Limit on maximum size of the hash table + for JOIN, in bytes. + type: number + maxBytesInSet: + description: Limit on the number of bytes in the set + resulting from the execution of the IN section. + type: number + maxBytesToRead: + description: Limits the maximum number of bytes (uncompressed + data) that can be read from a table when running + a query. + type: number + maxBytesToSort: + description: Limits the maximum number of bytes (uncompressed + data) that can be read from a table for sorting. + type: number + maxBytesToTransfer: + description: Limits the maximum number of bytes (uncompressed + data) that can be passed to a remote server or saved + in a temporary table when using GLOBAL IN. + type: number + maxColumnsToRead: + description: Limits the maximum number of columns + that can be read from a table in a single query. + type: number + maxConcurrentQueriesForUser: + description: 'The maximum number of concurrent requests + per user. Default value: 0 (no limit).' + type: number + maxExecutionTime: + description: Limits the maximum query execution time + in milliseconds. + type: number + maxExpandedAstElements: + description: Maximum abstract syntax tree depth after + after expansion of aliases. + type: number + maxFinalThreads: + description: Sets the maximum number of parallel threads + for the SELECT query data read phase with the FINAL + modifier. + type: number + maxHttpGetRedirects: + description: Limits the maximum number of HTTP GET + redirect hops for URL-engine tables. + type: number + maxInsertBlockSize: + description: The size of blocks (in a count of rows) + to form for insertion into a table. + type: number + maxInsertThreads: + description: 'The maximum number of threads to execute + the INSERT SELECT query. Default value: 0.' + type: number + maxMemoryUsage: + description: Limits the maximum memory usage (in bytes) + for processing queries on a single server. + type: number + maxMemoryUsageForUser: + description: Limits the maximum memory usage (in bytes) + for processing of user's queries on a single server. + type: number + maxNetworkBandwidth: + description: Limits the speed of the data exchange + over the network in bytes per second. + type: number + maxNetworkBandwidthForUser: + description: Limits the speed of the data exchange + over the network in bytes per second. + type: number + maxParserDepth: + description: Limits maximum recursion depth in the + recursive descent parser. Allows controlling the + stack size. Zero means unlimited. + type: number + maxQuerySize: + description: The maximum part of a query that can + be taken to RAM for parsing with the SQL parser. + type: number + maxReadBufferSize: + description: The maximum size of the buffer to read + from the filesystem. + type: number + maxReplicaDelayForDistributedQueries: + description: Disables lagging replicas for distributed + queries. + type: number + maxResultBytes: + description: Limits the number of bytes in the result. + type: number + maxResultRows: + description: Limits the number of rows in the result. + type: number + maxRowsInDistinct: + description: Limits the maximum number of different + rows when using DISTINCT. + type: number + maxRowsInJoin: + description: Limit on maximum size of the hash table + for JOIN, in rows. + type: number + maxRowsInSet: + description: Limit on the number of rows in the set + resulting from the execution of the IN section. + type: number + maxRowsToGroupBy: + description: Limits the maximum number of unique keys + received from aggregation function. + type: number + maxRowsToRead: + description: Limits the maximum number of rows that + can be read from a table when running a query. + type: number + maxRowsToSort: + description: Limits the maximum number of rows that + can be read from a table for sorting. + type: number + maxRowsToTransfer: + description: Limits the maximum number of rows that + can be passed to a remote server or saved in a temporary + table when using GLOBAL IN. + type: number + maxTemporaryColumns: + description: Limits the maximum number of temporary + columns that must be kept in RAM at the same time + when running a query, including constant columns. + type: number + maxTemporaryDataOnDiskSizeForQuery: + description: The maximum amount of data consumed by + temporary files on disk in bytes for all concurrently + running queries. Zero means unlimited. + type: number + maxTemporaryDataOnDiskSizeForUser: + description: The maximum amount of data consumed by + temporary files on disk in bytes for all concurrently + running user queries. Zero means unlimited. + type: number + maxTemporaryNonConstColumns: + description: Limits the maximum number of temporary + columns that must be kept in RAM at the same time + when running a query, excluding constant columns. + type: number + maxThreads: + description: The maximum number of query processing + threads, excluding threads for retrieving data from + remote servers. + type: number + memoryOvercommitRatioDenominator: + description: It represents soft memory limit in case + when hard limit is reached on user level. This value + is used to compute overcommit ratio for the query. + Zero means skip the query. + type: number + memoryOvercommitRatioDenominatorForUser: + description: It represents soft memory limit in case + when hard limit is reached on global level. This + value is used to compute overcommit ratio for the + query. Zero means skip the query. + type: number + memoryProfilerSampleProbability: + description: 'Collect random allocations and deallocations + and write them into system.trace_log with ''MemorySample'' + trace_type. The probability is for every alloc/free + regardless to the size of the allocation. Possible + values: from 0 to 1. Default: 0.' + type: number + memoryProfilerStep: + description: 'Memory profiler step (in bytes). If + the next query step requires more memory than this + parameter specifies, the memory profiler collects + the allocating stack trace. Values lower than a + few megabytes slow down query processing. Default + value: 4194304 (4 MB). Zero means disabled memory + profiler.' + type: number + memoryUsageOvercommitMaxWaitMicroseconds: + description: Maximum time thread will wait for memory + to be freed in the case of memory overcommit on + a user level. If the timeout is reached and memory + is not freed, an exception is thrown. + type: number + mergeTreeMaxBytesToUseCache: + description: If ClickHouse should read more than merge_tree_max_bytes_to_use_cache + bytes in one query, it doesn’t use the cache of + uncompressed blocks. + type: number + mergeTreeMaxRowsToUseCache: + description: If ClickHouse should read more than merge_tree_max_rows_to_use_cache + rows in one query, it doesn’t use the cache of uncompressed + blocks. + type: number + mergeTreeMinBytesForConcurrentRead: + description: If the number of bytes to read from one + file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, + then ClickHouse tries to concurrently read from + this file in several threads. + type: number + mergeTreeMinRowsForConcurrentRead: + description: If the number of rows to be read from + a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read + then ClickHouse tries to perform a concurrent reading + from this file on several threads. + type: number + minBytesToUseDirectIo: + description: The minimum data volume required for + using direct I/O access to the storage disk. + type: number + minCountToCompile: + description: How many times to potentially use a compiled + chunk of code before running compilation. + type: number + minCountToCompileExpression: + description: A query waits for expression compilation + process to complete prior to continuing execution. + type: number + minExecutionSpeed: + description: Minimal execution speed in rows per second. + type: number + minExecutionSpeedBytes: + description: Minimal execution speed in bytes per + second. + type: number + minInsertBlockSizeBytes: + description: Sets the minimum number of bytes in the + block which can be inserted into a table by an INSERT + query. + type: number + minInsertBlockSizeRows: + description: Sets the minimum number of rows in the + block which can be inserted into a table by an INSERT + query. + type: number + outputFormatJsonQuote64BitIntegers: + description: If the value is true, integers appear + in quotes when using JSON* Int64 and UInt64 formats + (for compatibility with most JavaScript implementations); + otherwise, integers are output without the quotes. + type: boolean + outputFormatJsonQuoteDenormals: + description: Enables +nan, -nan, +inf, -inf outputs + in JSON output format. + type: boolean + preferLocalhostReplica: + description: 'Enables/disables preferable using the + localhost replica when processing distributed queries. + Default value: true.' + type: boolean + priority: + description: Query priority. + type: number + quotaMode: + description: Quota accounting mode. + type: string + readOverflowMode: + description: 'Sets behaviour on overflow while read. + Possible values:' + type: string + readonly: + description: Restricts permissions for reading data, + write data and change settings queries. + type: number + receiveTimeout: + description: Receive timeout in milliseconds on the + socket used for communicating with the client. + type: number + remoteFilesystemReadMethod: + description: 'Method of reading data from remote filesystem, + one of: read, threadpool.' + type: string + replicationAlterPartitionsSync: + description: For ALTER ... ATTACH|DETACH|DROP queries, + you can use the replication_alter_partitions_sync + setting to set up waiting. + type: number + resultOverflowMode: + description: 'Sets behaviour on overflow in result. + Possible values:' + type: string + selectSequentialConsistency: + description: Enables or disables sequential consistency + for SELECT queries. + type: boolean + sendProgressInHttpHeaders: + description: Enables or disables X-ClickHouse-Progress + HTTP response headers in clickhouse-server responses. + type: boolean + sendTimeout: + description: Send timeout in milliseconds on the socket + used for communicating with the client. + type: number + setOverflowMode: + description: 'Sets behaviour on overflow in the set + resulting. Possible values:' + type: string + skipUnavailableShards: + description: Enables or disables silently skipping + of unavailable shards. + type: boolean + sortOverflowMode: + description: 'Sets behaviour on overflow while sort. + Possible values:' + type: string + timeoutBeforeCheckingExecutionSpeed: + description: Timeout (in seconds) between checks of + execution speed. It is checked that execution speed + is not less that specified in min_execution_speed + parameter. Must be at least 1000. + type: number + timeoutOverflowMode: + description: 'Sets behaviour on overflow. Possible + values:' + type: string + transferOverflowMode: + description: 'Sets behaviour on overflow. Possible + values:' + type: string + transformNullIn: + description: Enables equality of NULL values for IN + operator. + type: boolean + useHedgedRequests: + description: 'Enables hedged requests logic for remote + queries. It allows to establish many connections + with different replicas for query. New connection + is enabled in case existent connection(s) with replica(s) + were not established within hedged_connection_timeout + or no data was received within receive_data_timeout. + Query uses the first connection which send non empty + progress packet (or data packet, if allow_changing_replica_until_first_data_packet); + other connections are cancelled. Queries with max_parallel_replicas + > 1 are supported. Default value: true.' + type: boolean + useUncompressedCache: + description: Whether to use a cache of uncompressed + blocks. + type: boolean + waitForAsyncInsert: + description: Enables waiting for processing of asynchronous + insertion. If enabled, server returns OK only after + the data is inserted. + type: boolean + waitForAsyncInsertTimeout: + description: The timeout (in seconds) for waiting + for processing of asynchronous insertion. Value + must be at least 1000 (1 second). + type: number + type: object + type: array + type: object + type: array + version: + description: Version of the ClickHouse server software. + type: string + zookeeper: + description: Configuration of the ZooKeeper subcluster. The structure + is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the ZooKeeper + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper hosts. + For more information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_elasticsearchclusters.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_elasticsearchclusters.yaml new file mode 100644 index 0000000..5f2b4e0 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_elasticsearchclusters.yaml @@ -0,0 +1,1620 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: elasticsearchclusters.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ElasticsearchCluster + listKind: ElasticsearchClusterList + plural: elasticsearchclusters + singular: elasticsearchcluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ElasticsearchCluster is the Schema for the ElasticsearchClusters + API. Manages a Elasticsearch cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ElasticsearchClusterSpec defines the desired state of ElasticsearchCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + config: + description: Configuration of the Elasticsearch cluster. The structure + is documented below. + items: + properties: + adminPasswordSecretRef: + description: Password for admin user of Elasticsearch. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dataNode: + description: Configuration for Elasticsearch data nodes + subcluster. The structure is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the Elasticsearch + master nodes subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of Elasticsearch + hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + edition: + description: Edition of Elasticsearch. For more information, + see the official documentation. + type: string + masterNode: + description: Configuration for Elasticsearch master nodes + subcluster. The structure is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the Elasticsearch + master nodes subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of Elasticsearch + hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + plugins: + description: A set of Elasticsearch plugins to install. + items: + type: string + type: array + x-kubernetes-list-type: set + version: + description: Version of Elasticsearch. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Elasticsearch cluster. + type: string + environment: + description: Deployment environment of the Elasticsearch cluster. + Can be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the Elasticsearch cluster. The structure + is documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. Can be either true or false. + type: boolean + name: + description: User defined host name. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The type of the host to be deployed. Can be + either DATA_NODE or MASTER_NODE. + type: string + zone: + description: The availability zone where the Elasticsearch + host will be created. For more information see the official + documentation. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Elasticsearch + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the Elasticsearch cluster. Provided by the + client when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Elasticsearch cluster + belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountId: + description: ID of the service account authorized for this cluster. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + config: + description: Configuration of the Elasticsearch cluster. The structure + is documented below. + items: + properties: + adminPasswordSecretRef: + description: Password for admin user of Elasticsearch. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dataNode: + description: Configuration for Elasticsearch data nodes + subcluster. The structure is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the Elasticsearch + master nodes subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of Elasticsearch + hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + edition: + description: Edition of Elasticsearch. For more information, + see the official documentation. + type: string + masterNode: + description: Configuration for Elasticsearch master nodes + subcluster. The structure is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the Elasticsearch + master nodes subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of Elasticsearch + hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + plugins: + description: A set of Elasticsearch plugins to install. + items: + type: string + type: array + x-kubernetes-list-type: set + version: + description: Version of Elasticsearch. + type: string + required: + - adminPasswordSecretRef + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Elasticsearch cluster. + type: string + environment: + description: Deployment environment of the Elasticsearch cluster. + Can be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the Elasticsearch cluster. The structure + is documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. Can be either true or false. + type: boolean + name: + description: User defined host name. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The type of the host to be deployed. Can be + either DATA_NODE or MASTER_NODE. + type: string + zone: + description: The availability zone where the Elasticsearch + host will be created. For more information see the official + documentation. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Elasticsearch + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the Elasticsearch cluster. Provided by the + client when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Elasticsearch cluster + belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceAccountId: + description: ID of the service account authorized for this cluster. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.config is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.config) + || (has(self.initProvider) && has(self.initProvider.config))' + - message: spec.forProvider.environment is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.environment) + || (has(self.initProvider) && has(self.initProvider.environment))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ElasticsearchClusterStatus defines the observed state of + ElasticsearchCluster. + properties: + atProvider: + properties: + config: + description: Configuration of the Elasticsearch cluster. The structure + is documented below. + items: + properties: + dataNode: + description: Configuration for Elasticsearch data nodes + subcluster. The structure is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the Elasticsearch + master nodes subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of Elasticsearch + hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + edition: + description: Edition of Elasticsearch. For more information, + see the official documentation. + type: string + masterNode: + description: Configuration for Elasticsearch master nodes + subcluster. The structure is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the Elasticsearch + master nodes subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of Elasticsearch + hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + plugins: + description: A set of Elasticsearch plugins to install. + items: + type: string + type: array + x-kubernetes-list-type: set + version: + description: Version of Elasticsearch. + type: string + type: object + type: array + createdAt: + description: Creation timestamp of the key. + type: string + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Elasticsearch cluster. + type: string + environment: + description: Deployment environment of the Elasticsearch cluster. + Can be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + health: + description: Aggregated health of the cluster. Can be either ALIVE, + DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health + field of JSON representation in the official documentation. + type: string + host: + description: A host of the Elasticsearch cluster. The structure + is documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. Can be either true or false. + type: boolean + fqdn: + description: The fully qualified domain name of the host. + type: string + name: + description: User defined host name. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + type: + description: The type of the host to be deployed. Can be + either DATA_NODE or MASTER_NODE. + type: string + zone: + description: The availability zone where the Elasticsearch + host will be created. For more information see the official + documentation. + type: string + type: object + type: array + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Elasticsearch + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the Elasticsearch cluster. Provided by the + client when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Elasticsearch cluster + belongs. + type: string + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountId: + description: ID of the service account authorized for this cluster. + type: string + status: + description: Status of the cluster. Can be either CREATING, STARTING, + RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. + For more information see status field of JSON representation + in the official documentation. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_greenplumclusters.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_greenplumclusters.yaml new file mode 100644 index 0000000..82f57bc --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_greenplumclusters.yaml @@ -0,0 +1,2093 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: greenplumclusters.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: GreenplumCluster + listKind: GreenplumClusterList + plural: greenplumclusters + singular: greenplumcluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: GreenplumCluster is the Schema for the GreenplumClusters API. + Manages a Greenplum cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GreenplumClusterSpec defines the desired state of GreenplumCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + access: + description: Access policy to the Greenplum cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + webSql: + description: Allows access for SQL queries in the management + console. + type: boolean + yandexQuery: + description: Allow access for Yandex Query + type: boolean + type: object + type: array + assignPublicIp: + description: Sets whether the master hosts should get a public + IP address on creation. Changing this parameter for an existing + host is not supported at the moment. + type: boolean + backgroundActivities: + items: + properties: + analyzeAndVacuum: + description: Block to configure 'ANALYZE' and 'VACUUM' daily + operations. + items: + properties: + analyzeTimeout: + description: Maximum duration of the ANALYZE operation, + in seconds. The default value is 36000. As soon + as this period expires, the ANALYZE operation will + be forced to terminate. + type: number + startTime: + description: Time of day in 'HH:MM' format when scripts + should run. + type: string + vacuumTimeout: + description: Maximum duration of the VACUUM operation, + in seconds. The default value is 36000. As soon + as this period expires, the VACUUM operation will + be forced to terminate. + type: number + type: object + type: array + queryKillerIdle: + description: Block to configure script that kills long running + queries that are in idle state. + items: + properties: + enable: + description: Flag that indicates whether script is + enabled. + type: boolean + ignoreUsers: + description: List of users to ignore when considering + queries to terminate. + items: + type: string + type: array + maxAge: + description: Maximum duration for this type of queries + (in seconds). + type: number + type: object + type: array + queryKillerIdleInTransaction: + description: block to configure script that kills long running + queries that are in idle in transaction state. + items: + properties: + enable: + description: Flag that indicates whether script is + enabled. + type: boolean + ignoreUsers: + description: List of users to ignore when considering + queries to terminate. + items: + type: string + type: array + maxAge: + description: Maximum duration for this type of queries + (in seconds). + type: number + type: object + type: array + queryKillerLongRunning: + description: block to configure script that kills long running + queries (in any state). + items: + properties: + enable: + description: Flag that indicates whether script is + enabled. + type: boolean + ignoreUsers: + description: List of users to ignore when considering + queries to terminate. + items: + type: string + type: array + maxAge: + description: Maximum duration for this type of queries + (in seconds). + type: number + type: object + type: array + type: object + type: array + backupWindowStart: + description: Time to start the daily backup, in the UTC timezone. + The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started (UTC). + type: number + minutes: + description: The minute at which backup will be started + (UTC). + type: number + type: object + type: array + cloudStorage: + description: Cloud Storage settings of the Greenplum cluster. + The structure is documented below. + items: + properties: + enable: + description: Whether to use cloud storage or not. + type: boolean + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Greenplum cluster. + type: string + environment: + description: Deployment environment of the Greenplum cluster. + (PRODUCTION, PRESTABLE) + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + greenplumConfig: + additionalProperties: + type: string + description: Greenplum cluster config. Detail info in "Greenplum + cluster settings" section (documented below). + type: object + x-kubernetes-map-type: granular + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Greenplum + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the Greenplum cluster. The + structure is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 0 and 23. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + masterHostCount: + description: Number of hosts in master subcluster (1 or 2). + type: number + masterHostGroupIds: + description: A list of IDs of the host groups to place master + subclusters' VMs of the cluster on. + items: + type: string + type: array + x-kubernetes-list-type: set + masterSubcluster: + description: Settings for master subcluster. The structure is + documented below. + items: + properties: + resources: + description: Resources allocated to hosts for master subcluster + of the Greenplum cluster. The structure is documented + below. + items: + properties: + diskSize: + type: number + diskTypeId: + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + name: + description: Name of the Greenplum cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Greenplum cluster + uses. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + poolerConfig: + description: Configuration of the connection pooler. The structure + is documented below. + items: + properties: + poolClientIdleTimeout: + description: Value for pool_client_idle_timeout parameter + in Odyssey. + type: number + poolSize: + description: Value for pool_size parameter in Odyssey. + type: number + poolingMode: + description: Mode that the connection pooler is working + in. See descriptions of all modes in the [documentation + for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + type: string + type: object + type: array + pxfConfig: + description: Configuration of the PXF daemon. The structure is + documented below. + items: + properties: + connectionTimeout: + description: The Tomcat server connection timeout for read + operations in seconds. Value is between 5 and 600. + type: number + maxThreads: + description: The maximum number of PXF tomcat threads. Value + is between 1 and 1024. + type: number + poolAllowCoreThreadTimeout: + description: Identifies whether or not core streaming threads + are allowed to time out. + type: boolean + poolCoreSize: + description: The number of core streaming threads. Value + is between 1 and 1024. + type: number + poolMaxSize: + description: The maximum allowed number of core streaming + threads. Value is between 1 and 1024. + type: number + poolQueueCapacity: + description: The capacity of the core streaming thread pool + queue. Value is positive. + type: number + uploadTimeout: + description: The Tomcat server connection timeout for write + operations in seconds. Value is between 5 and 600. + type: number + xms: + description: Maximum JVM heap size for PXF daemon. Value + is between 64 and 16384. + type: number + xmx: + description: Initial JVM heap size for PXF daemon. Value + is between 64 and 16384. + type: number + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + segmentHostCount: + description: Number of hosts in segment subcluster (from 1 to + 32). + type: number + segmentHostGroupIds: + description: A list of IDs of the host groups to place segment + subclusters' VMs of the cluster on. + items: + type: string + type: array + x-kubernetes-list-type: set + segmentInHost: + description: Number of segments on segment host (not more then + 1 + RAM/8). + type: number + segmentSubcluster: + description: Settings for segment subcluster. The structure is + documented below. + items: + properties: + resources: + description: Resources allocated to hosts for segment subcluster + of the Greenplum cluster. The structure is documented + below. + items: + properties: + diskSize: + type: number + diskTypeId: + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + subnetId: + description: The ID of the subnet, to which the hosts belongs. + The subnet must be a part of the network to which the cluster + belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userName: + description: Greenplum cluster admin user name. + type: string + userPasswordSecretRef: + description: Greenplum cluster admin password name. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + version: + description: Version of the Greenplum cluster. (6.22 or 6.25) + type: string + zone: + description: The availability zone where the Greenplum hosts will + be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + access: + description: Access policy to the Greenplum cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + webSql: + description: Allows access for SQL queries in the management + console. + type: boolean + yandexQuery: + description: Allow access for Yandex Query + type: boolean + type: object + type: array + assignPublicIp: + description: Sets whether the master hosts should get a public + IP address on creation. Changing this parameter for an existing + host is not supported at the moment. + type: boolean + backgroundActivities: + items: + properties: + analyzeAndVacuum: + description: Block to configure 'ANALYZE' and 'VACUUM' daily + operations. + items: + properties: + analyzeTimeout: + description: Maximum duration of the ANALYZE operation, + in seconds. The default value is 36000. As soon + as this period expires, the ANALYZE operation will + be forced to terminate. + type: number + startTime: + description: Time of day in 'HH:MM' format when scripts + should run. + type: string + vacuumTimeout: + description: Maximum duration of the VACUUM operation, + in seconds. The default value is 36000. As soon + as this period expires, the VACUUM operation will + be forced to terminate. + type: number + type: object + type: array + queryKillerIdle: + description: Block to configure script that kills long running + queries that are in idle state. + items: + properties: + enable: + description: Flag that indicates whether script is + enabled. + type: boolean + ignoreUsers: + description: List of users to ignore when considering + queries to terminate. + items: + type: string + type: array + maxAge: + description: Maximum duration for this type of queries + (in seconds). + type: number + type: object + type: array + queryKillerIdleInTransaction: + description: block to configure script that kills long running + queries that are in idle in transaction state. + items: + properties: + enable: + description: Flag that indicates whether script is + enabled. + type: boolean + ignoreUsers: + description: List of users to ignore when considering + queries to terminate. + items: + type: string + type: array + maxAge: + description: Maximum duration for this type of queries + (in seconds). + type: number + type: object + type: array + queryKillerLongRunning: + description: block to configure script that kills long running + queries (in any state). + items: + properties: + enable: + description: Flag that indicates whether script is + enabled. + type: boolean + ignoreUsers: + description: List of users to ignore when considering + queries to terminate. + items: + type: string + type: array + maxAge: + description: Maximum duration for this type of queries + (in seconds). + type: number + type: object + type: array + type: object + type: array + backupWindowStart: + description: Time to start the daily backup, in the UTC timezone. + The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started (UTC). + type: number + minutes: + description: The minute at which backup will be started + (UTC). + type: number + type: object + type: array + cloudStorage: + description: Cloud Storage settings of the Greenplum cluster. + The structure is documented below. + items: + properties: + enable: + description: Whether to use cloud storage or not. + type: boolean + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Greenplum cluster. + type: string + environment: + description: Deployment environment of the Greenplum cluster. + (PRODUCTION, PRESTABLE) + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + greenplumConfig: + additionalProperties: + type: string + description: Greenplum cluster config. Detail info in "Greenplum + cluster settings" section (documented below). + type: object + x-kubernetes-map-type: granular + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Greenplum + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the Greenplum cluster. The + structure is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 0 and 23. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + masterHostCount: + description: Number of hosts in master subcluster (1 or 2). + type: number + masterHostGroupIds: + description: A list of IDs of the host groups to place master + subclusters' VMs of the cluster on. + items: + type: string + type: array + x-kubernetes-list-type: set + masterSubcluster: + description: Settings for master subcluster. The structure is + documented below. + items: + properties: + resources: + description: Resources allocated to hosts for master subcluster + of the Greenplum cluster. The structure is documented + below. + items: + properties: + diskSize: + type: number + diskTypeId: + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + name: + description: Name of the Greenplum cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Greenplum cluster + uses. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + poolerConfig: + description: Configuration of the connection pooler. The structure + is documented below. + items: + properties: + poolClientIdleTimeout: + description: Value for pool_client_idle_timeout parameter + in Odyssey. + type: number + poolSize: + description: Value for pool_size parameter in Odyssey. + type: number + poolingMode: + description: Mode that the connection pooler is working + in. See descriptions of all modes in the [documentation + for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + type: string + type: object + type: array + pxfConfig: + description: Configuration of the PXF daemon. The structure is + documented below. + items: + properties: + connectionTimeout: + description: The Tomcat server connection timeout for read + operations in seconds. Value is between 5 and 600. + type: number + maxThreads: + description: The maximum number of PXF tomcat threads. Value + is between 1 and 1024. + type: number + poolAllowCoreThreadTimeout: + description: Identifies whether or not core streaming threads + are allowed to time out. + type: boolean + poolCoreSize: + description: The number of core streaming threads. Value + is between 1 and 1024. + type: number + poolMaxSize: + description: The maximum allowed number of core streaming + threads. Value is between 1 and 1024. + type: number + poolQueueCapacity: + description: The capacity of the core streaming thread pool + queue. Value is positive. + type: number + uploadTimeout: + description: The Tomcat server connection timeout for write + operations in seconds. Value is between 5 and 600. + type: number + xms: + description: Maximum JVM heap size for PXF daemon. Value + is between 64 and 16384. + type: number + xmx: + description: Initial JVM heap size for PXF daemon. Value + is between 64 and 16384. + type: number + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + segmentHostCount: + description: Number of hosts in segment subcluster (from 1 to + 32). + type: number + segmentHostGroupIds: + description: A list of IDs of the host groups to place segment + subclusters' VMs of the cluster on. + items: + type: string + type: array + x-kubernetes-list-type: set + segmentInHost: + description: Number of segments on segment host (not more then + 1 + RAM/8). + type: number + segmentSubcluster: + description: Settings for segment subcluster. The structure is + documented below. + items: + properties: + resources: + description: Resources allocated to hosts for segment subcluster + of the Greenplum cluster. The structure is documented + below. + items: + properties: + diskSize: + type: number + diskTypeId: + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + subnetId: + description: The ID of the subnet, to which the hosts belongs. + The subnet must be a part of the network to which the cluster + belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userName: + description: Greenplum cluster admin user name. + type: string + userPasswordSecretRef: + description: Greenplum cluster admin password name. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + version: + description: Version of the Greenplum cluster. (6.22 or 6.25) + type: string + zone: + description: The availability zone where the Greenplum hosts will + be created. + type: string + required: + - userPasswordSecretRef + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.assignPublicIp is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.assignPublicIp) + || (has(self.initProvider) && has(self.initProvider.assignPublicIp))' + - message: spec.forProvider.environment is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.environment) + || (has(self.initProvider) && has(self.initProvider.environment))' + - message: spec.forProvider.masterHostCount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.masterHostCount) + || (has(self.initProvider) && has(self.initProvider.masterHostCount))' + - message: spec.forProvider.masterSubcluster is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.masterSubcluster) + || (has(self.initProvider) && has(self.initProvider.masterSubcluster))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.segmentHostCount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.segmentHostCount) + || (has(self.initProvider) && has(self.initProvider.segmentHostCount))' + - message: spec.forProvider.segmentInHost is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.segmentInHost) + || (has(self.initProvider) && has(self.initProvider.segmentInHost))' + - message: spec.forProvider.segmentSubcluster is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.segmentSubcluster) + || (has(self.initProvider) && has(self.initProvider.segmentSubcluster))' + - message: spec.forProvider.userName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.userName) + || (has(self.initProvider) && has(self.initProvider.userName))' + - message: spec.forProvider.userPasswordSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.userPasswordSecretRef)' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + - message: spec.forProvider.zone is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.zone) + || (has(self.initProvider) && has(self.initProvider.zone))' + status: + description: GreenplumClusterStatus defines the observed state of GreenplumCluster. + properties: + atProvider: + properties: + access: + description: Access policy to the Greenplum cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + webSql: + description: Allows access for SQL queries in the management + console. + type: boolean + yandexQuery: + description: Allow access for Yandex Query + type: boolean + type: object + type: array + assignPublicIp: + description: Sets whether the master hosts should get a public + IP address on creation. Changing this parameter for an existing + host is not supported at the moment. + type: boolean + backgroundActivities: + items: + properties: + analyzeAndVacuum: + description: Block to configure 'ANALYZE' and 'VACUUM' daily + operations. + items: + properties: + analyzeTimeout: + description: Maximum duration of the ANALYZE operation, + in seconds. The default value is 36000. As soon + as this period expires, the ANALYZE operation will + be forced to terminate. + type: number + startTime: + description: Time of day in 'HH:MM' format when scripts + should run. + type: string + vacuumTimeout: + description: Maximum duration of the VACUUM operation, + in seconds. The default value is 36000. As soon + as this period expires, the VACUUM operation will + be forced to terminate. + type: number + type: object + type: array + queryKillerIdle: + description: Block to configure script that kills long running + queries that are in idle state. + items: + properties: + enable: + description: Flag that indicates whether script is + enabled. + type: boolean + ignoreUsers: + description: List of users to ignore when considering + queries to terminate. + items: + type: string + type: array + maxAge: + description: Maximum duration for this type of queries + (in seconds). + type: number + type: object + type: array + queryKillerIdleInTransaction: + description: block to configure script that kills long running + queries that are in idle in transaction state. + items: + properties: + enable: + description: Flag that indicates whether script is + enabled. + type: boolean + ignoreUsers: + description: List of users to ignore when considering + queries to terminate. + items: + type: string + type: array + maxAge: + description: Maximum duration for this type of queries + (in seconds). + type: number + type: object + type: array + queryKillerLongRunning: + description: block to configure script that kills long running + queries (in any state). + items: + properties: + enable: + description: Flag that indicates whether script is + enabled. + type: boolean + ignoreUsers: + description: List of users to ignore when considering + queries to terminate. + items: + type: string + type: array + maxAge: + description: Maximum duration for this type of queries + (in seconds). + type: number + type: object + type: array + type: object + type: array + backupWindowStart: + description: Time to start the daily backup, in the UTC timezone. + The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started (UTC). + type: number + minutes: + description: The minute at which backup will be started + (UTC). + type: number + type: object + type: array + cloudStorage: + description: Cloud Storage settings of the Greenplum cluster. + The structure is documented below. + items: + properties: + enable: + description: Whether to use cloud storage or not. + type: boolean + type: object + type: array + createdAt: + description: Creation timestamp of the cluster. + type: string + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Greenplum cluster. + type: string + environment: + description: Deployment environment of the Greenplum cluster. + (PRODUCTION, PRESTABLE) + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + greenplumConfig: + additionalProperties: + type: string + description: Greenplum cluster config. Detail info in "Greenplum + cluster settings" section (documented below). + type: object + x-kubernetes-map-type: granular + health: + description: Aggregated health of the cluster. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Greenplum + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the Greenplum cluster. The + structure is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 0 and 23. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + masterHostCount: + description: Number of hosts in master subcluster (1 or 2). + type: number + masterHostGroupIds: + description: A list of IDs of the host groups to place master + subclusters' VMs of the cluster on. + items: + type: string + type: array + x-kubernetes-list-type: set + masterHosts: + description: (Computed) Info about hosts in master subcluster. + The structure is documented below. + items: + properties: + assignPublicIp: + description: (Computed) Flag indicating that master hosts + should be created with a public IP address. + type: boolean + fqdn: + description: (Computed) The fully qualified domain name + of the host. + type: string + type: object + type: array + masterSubcluster: + description: Settings for master subcluster. The structure is + documented below. + items: + properties: + resources: + description: Resources allocated to hosts for master subcluster + of the Greenplum cluster. The structure is documented + below. + items: + properties: + diskSize: + type: number + diskTypeId: + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + name: + description: Name of the Greenplum cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Greenplum cluster + uses. + type: string + poolerConfig: + description: Configuration of the connection pooler. The structure + is documented below. + items: + properties: + poolClientIdleTimeout: + description: Value for pool_client_idle_timeout parameter + in Odyssey. + type: number + poolSize: + description: Value for pool_size parameter in Odyssey. + type: number + poolingMode: + description: Mode that the connection pooler is working + in. See descriptions of all modes in the [documentation + for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + type: string + type: object + type: array + pxfConfig: + description: Configuration of the PXF daemon. The structure is + documented below. + items: + properties: + connectionTimeout: + description: The Tomcat server connection timeout for read + operations in seconds. Value is between 5 and 600. + type: number + maxThreads: + description: The maximum number of PXF tomcat threads. Value + is between 1 and 1024. + type: number + poolAllowCoreThreadTimeout: + description: Identifies whether or not core streaming threads + are allowed to time out. + type: boolean + poolCoreSize: + description: The number of core streaming threads. Value + is between 1 and 1024. + type: number + poolMaxSize: + description: The maximum allowed number of core streaming + threads. Value is between 1 and 1024. + type: number + poolQueueCapacity: + description: The capacity of the core streaming thread pool + queue. Value is positive. + type: number + uploadTimeout: + description: The Tomcat server connection timeout for write + operations in seconds. Value is between 5 and 600. + type: number + xms: + description: Maximum JVM heap size for PXF daemon. Value + is between 64 and 16384. + type: number + xmx: + description: Initial JVM heap size for PXF daemon. Value + is between 64 and 16384. + type: number + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + segmentHostCount: + description: Number of hosts in segment subcluster (from 1 to + 32). + type: number + segmentHostGroupIds: + description: A list of IDs of the host groups to place segment + subclusters' VMs of the cluster on. + items: + type: string + type: array + x-kubernetes-list-type: set + segmentHosts: + description: (Computed) Info about hosts in segment subcluster. + The structure is documented below. + items: + properties: + fqdn: + description: (Computed) The fully qualified domain name + of the host. + type: string + type: object + type: array + segmentInHost: + description: Number of segments on segment host (not more then + 1 + RAM/8). + type: number + segmentSubcluster: + description: Settings for segment subcluster. The structure is + documented below. + items: + properties: + resources: + description: Resources allocated to hosts for segment subcluster + of the Greenplum cluster. The structure is documented + below. + items: + properties: + diskSize: + type: number + diskTypeId: + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + status: + description: Status of the cluster. + type: string + subnetId: + description: The ID of the subnet, to which the hosts belongs. + The subnet must be a part of the network to which the cluster + belongs. + type: string + userName: + description: Greenplum cluster admin user name. + type: string + version: + description: Version of the Greenplum cluster. (6.22 or 6.25) + type: string + zone: + description: The availability zone where the Greenplum hosts will + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkaclusters.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkaclusters.yaml new file mode 100644 index 0000000..e6e4831 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkaclusters.yaml @@ -0,0 +1,1978 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: kafkaclusters.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: KafkaCluster + listKind: KafkaClusterList + plural: kafkaclusters + singular: kafkacluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: KafkaCluster is the Schema for the KafkaClusters API. Manages + a Kafka cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KafkaClusterSpec defines the desired state of KafkaCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + config: + description: Configuration of the Kafka cluster. The structure + is documented below. + items: + properties: + access: + description: Access policy to the Kafka cluster. The structure + is documented below. + items: + properties: + dataTransfer: + description: Allow access for DataTransfer + type: boolean + type: object + type: array + assignPublicIp: + description: Determines whether each broker will be assigned + a public IP address. The default is false. + type: boolean + brokersCount: + description: Count of brokers per availability zone. The + default is 1. + type: number + diskSizeAutoscaling: + description: Disk autoscaling settings of the Kafka cluster. + The structure is documented below. + items: + properties: + diskSizeLimit: + description: Maximum possible size of disk in bytes. + type: number + emergencyUsageThreshold: + description: Percent of disk utilization. Disk will + autoscale immediately, if this threshold reached. + Value is between 0 and 100. Default value is 0 (autoscaling + disabled). Must be not less then 'planned_usage_threshold' + value. + type: number + plannedUsageThreshold: + description: Percent of disk utilization. During maintenance + disk will autoscale, if this threshold reached. + Value is between 0 and 100. Default value is 0 (autoscaling + disabled). + type: number + type: object + type: array + kafka: + description: Configuration of the Kafka subcluster. The + structure is documented below. + items: + properties: + kafkaConfig: + description: User-defined settings for the Kafka cluster. + The structure is documented below. + items: + properties: + autoCreateTopicsEnable: + type: boolean + compressionType: + description: Kafka topic settings. For more + information, see the official documentation + and the Kafka documentation. + type: string + defaultReplicationFactor: + type: string + logFlushIntervalMessages: + type: string + logFlushIntervalMs: + type: string + logFlushSchedulerIntervalMs: + type: string + logPreallocate: + type: boolean + logRetentionBytes: + type: string + logRetentionHours: + type: string + logRetentionMinutes: + type: string + logRetentionMs: + type: string + logSegmentBytes: + type: string + messageMaxBytes: + type: string + numPartitions: + type: string + offsetsRetentionMinutes: + type: string + replicaFetchMaxBytes: + type: string + saslEnabledMechanisms: + items: + type: string + type: array + x-kubernetes-list-type: set + socketReceiveBufferBytes: + type: string + socketSendBufferBytes: + type: string + sslCipherSuites: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + resources: + description: Resources allocated to hosts of the ZooKeeper + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available + to a ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper + hosts. For more information see the official + documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + schemaRegistry: + description: Enables managed schema registry on cluster. + The default is false. + type: boolean + unmanagedTopics: + type: boolean + version: + description: Version of the Kafka server software. + type: string + zones: + description: List of availability zones. + items: + type: string + type: array + zookeeper: + description: Configuration of the ZooKeeper subcluster. + The structure is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the ZooKeeper + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available + to a ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper + hosts. For more information see the official + documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Kafka cluster. + type: string + environment: + description: Deployment environment of the Kafka cluster. Can + be either PRESTABLE or PRODUCTION. The default is PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostGroupIds: + description: A list of IDs of the host groups to place VMs of + the cluster on. + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Kafka + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the Kafka cluster. The structure + is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 1 and 24. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the Kafka cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Kafka cluster belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: Security group ids, to which the Kafka cluster belongs. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: IDs of the subnets, to which the Kafka cluster belongs. + items: + type: string + type: array + subnetIdsRefs: + description: References to Subnet in vpc to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in vpc to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + topic: + description: (Deprecated) To manage topics, please switch to using + a separate resource type yandex_mdb_kafka_topic. + items: + properties: + name: + description: The name of the topic. + type: string + partitions: + description: The number of the topic's partitions. + type: number + replicationFactor: + description: Amount of data copies (replicas) for the topic + in the cluster. + type: number + topicConfig: + description: User-defined settings for the topic. The structure + is documented below. + items: + properties: + cleanupPolicy: + type: string + compressionType: + description: Kafka topic settings. For more information, + see the official documentation and the Kafka documentation. + type: string + deleteRetentionMs: + type: string + fileDeleteDelayMs: + type: string + flushMessages: + type: string + flushMs: + type: string + maxMessageBytes: + type: string + minCompactionLagMs: + type: string + minInsyncReplicas: + type: string + preallocate: + type: boolean + retentionBytes: + type: string + retentionMs: + type: string + segmentBytes: + type: string + type: object + type: array + type: object + type: array + user: + description: (Deprecated) To manage users, please switch to using + a separate resource type yandex_mdb_kafka_user. + items: + properties: + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + allowHosts: + description: Set of hosts, to which this permission + grants access to. + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role type to grant to the topic. + type: string + topicName: + description: The name of the topic that the permission + grants access to. + type: string + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + config: + description: Configuration of the Kafka cluster. The structure + is documented below. + items: + properties: + access: + description: Access policy to the Kafka cluster. The structure + is documented below. + items: + properties: + dataTransfer: + description: Allow access for DataTransfer + type: boolean + type: object + type: array + assignPublicIp: + description: Determines whether each broker will be assigned + a public IP address. The default is false. + type: boolean + brokersCount: + description: Count of brokers per availability zone. The + default is 1. + type: number + diskSizeAutoscaling: + description: Disk autoscaling settings of the Kafka cluster. + The structure is documented below. + items: + properties: + diskSizeLimit: + description: Maximum possible size of disk in bytes. + type: number + emergencyUsageThreshold: + description: Percent of disk utilization. Disk will + autoscale immediately, if this threshold reached. + Value is between 0 and 100. Default value is 0 (autoscaling + disabled). Must be not less then 'planned_usage_threshold' + value. + type: number + plannedUsageThreshold: + description: Percent of disk utilization. During maintenance + disk will autoscale, if this threshold reached. + Value is between 0 and 100. Default value is 0 (autoscaling + disabled). + type: number + type: object + type: array + kafka: + description: Configuration of the Kafka subcluster. The + structure is documented below. + items: + properties: + kafkaConfig: + description: User-defined settings for the Kafka cluster. + The structure is documented below. + items: + properties: + autoCreateTopicsEnable: + type: boolean + compressionType: + description: Kafka topic settings. For more + information, see the official documentation + and the Kafka documentation. + type: string + defaultReplicationFactor: + type: string + logFlushIntervalMessages: + type: string + logFlushIntervalMs: + type: string + logFlushSchedulerIntervalMs: + type: string + logPreallocate: + type: boolean + logRetentionBytes: + type: string + logRetentionHours: + type: string + logRetentionMinutes: + type: string + logRetentionMs: + type: string + logSegmentBytes: + type: string + messageMaxBytes: + type: string + numPartitions: + type: string + offsetsRetentionMinutes: + type: string + replicaFetchMaxBytes: + type: string + saslEnabledMechanisms: + items: + type: string + type: array + x-kubernetes-list-type: set + socketReceiveBufferBytes: + type: string + socketSendBufferBytes: + type: string + sslCipherSuites: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + resources: + description: Resources allocated to hosts of the ZooKeeper + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available + to a ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper + hosts. For more information see the official + documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + schemaRegistry: + description: Enables managed schema registry on cluster. + The default is false. + type: boolean + unmanagedTopics: + type: boolean + version: + description: Version of the Kafka server software. + type: string + zones: + description: List of availability zones. + items: + type: string + type: array + zookeeper: + description: Configuration of the ZooKeeper subcluster. + The structure is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the ZooKeeper + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available + to a ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper + hosts. For more information see the official + documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Kafka cluster. + type: string + environment: + description: Deployment environment of the Kafka cluster. Can + be either PRESTABLE or PRODUCTION. The default is PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostGroupIds: + description: A list of IDs of the host groups to place VMs of + the cluster on. + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Kafka + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the Kafka cluster. The structure + is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 1 and 24. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the Kafka cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Kafka cluster belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: Security group ids, to which the Kafka cluster belongs. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: IDs of the subnets, to which the Kafka cluster belongs. + items: + type: string + type: array + subnetIdsRefs: + description: References to Subnet in vpc to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in vpc to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + topic: + description: (Deprecated) To manage topics, please switch to using + a separate resource type yandex_mdb_kafka_topic. + items: + properties: + name: + description: The name of the topic. + type: string + partitions: + description: The number of the topic's partitions. + type: number + replicationFactor: + description: Amount of data copies (replicas) for the topic + in the cluster. + type: number + topicConfig: + description: User-defined settings for the topic. The structure + is documented below. + items: + properties: + cleanupPolicy: + type: string + compressionType: + description: Kafka topic settings. For more information, + see the official documentation and the Kafka documentation. + type: string + deleteRetentionMs: + type: string + fileDeleteDelayMs: + type: string + flushMessages: + type: string + flushMs: + type: string + maxMessageBytes: + type: string + minCompactionLagMs: + type: string + minInsyncReplicas: + type: string + preallocate: + type: boolean + retentionBytes: + type: string + retentionMs: + type: string + segmentBytes: + type: string + type: object + type: array + type: object + type: array + user: + description: (Deprecated) To manage users, please switch to using + a separate resource type yandex_mdb_kafka_user. + items: + properties: + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + allowHosts: + description: Set of hosts, to which this permission + grants access to. + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role type to grant to the topic. + type: string + topicName: + description: The name of the topic that the permission + grants access to. + type: string + type: object + type: array + required: + - passwordSecretRef + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.config is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.config) + || (has(self.initProvider) && has(self.initProvider.config))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: KafkaClusterStatus defines the observed state of KafkaCluster. + properties: + atProvider: + properties: + config: + description: Configuration of the Kafka cluster. The structure + is documented below. + items: + properties: + access: + description: Access policy to the Kafka cluster. The structure + is documented below. + items: + properties: + dataTransfer: + description: Allow access for DataTransfer + type: boolean + type: object + type: array + assignPublicIp: + description: Determines whether each broker will be assigned + a public IP address. The default is false. + type: boolean + brokersCount: + description: Count of brokers per availability zone. The + default is 1. + type: number + diskSizeAutoscaling: + description: Disk autoscaling settings of the Kafka cluster. + The structure is documented below. + items: + properties: + diskSizeLimit: + description: Maximum possible size of disk in bytes. + type: number + emergencyUsageThreshold: + description: Percent of disk utilization. Disk will + autoscale immediately, if this threshold reached. + Value is between 0 and 100. Default value is 0 (autoscaling + disabled). Must be not less then 'planned_usage_threshold' + value. + type: number + plannedUsageThreshold: + description: Percent of disk utilization. During maintenance + disk will autoscale, if this threshold reached. + Value is between 0 and 100. Default value is 0 (autoscaling + disabled). + type: number + type: object + type: array + kafka: + description: Configuration of the Kafka subcluster. The + structure is documented below. + items: + properties: + kafkaConfig: + description: User-defined settings for the Kafka cluster. + The structure is documented below. + items: + properties: + autoCreateTopicsEnable: + type: boolean + compressionType: + description: Kafka topic settings. For more + information, see the official documentation + and the Kafka documentation. + type: string + defaultReplicationFactor: + type: string + logFlushIntervalMessages: + type: string + logFlushIntervalMs: + type: string + logFlushSchedulerIntervalMs: + type: string + logPreallocate: + type: boolean + logRetentionBytes: + type: string + logRetentionHours: + type: string + logRetentionMinutes: + type: string + logRetentionMs: + type: string + logSegmentBytes: + type: string + messageMaxBytes: + type: string + numPartitions: + type: string + offsetsRetentionMinutes: + type: string + replicaFetchMaxBytes: + type: string + saslEnabledMechanisms: + items: + type: string + type: array + x-kubernetes-list-type: set + socketReceiveBufferBytes: + type: string + socketSendBufferBytes: + type: string + sslCipherSuites: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + resources: + description: Resources allocated to hosts of the ZooKeeper + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available + to a ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper + hosts. For more information see the official + documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + schemaRegistry: + description: Enables managed schema registry on cluster. + The default is false. + type: boolean + unmanagedTopics: + type: boolean + version: + description: Version of the Kafka server software. + type: string + zones: + description: List of availability zones. + items: + type: string + type: array + zookeeper: + description: Configuration of the ZooKeeper subcluster. + The structure is documented below. + items: + properties: + resources: + description: Resources allocated to hosts of the ZooKeeper + subcluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available + to a ZooKeeper host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of ZooKeeper + hosts. For more information see the official + documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + createdAt: + description: Timestamp of cluster creation. + type: string + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Kafka cluster. + type: string + environment: + description: Deployment environment of the Kafka cluster. Can + be either PRESTABLE or PRODUCTION. The default is PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + health: + description: Aggregated health of the cluster. Can be either ALIVE, + DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health + field of JSON representation in the official documentation. + type: string + host: + description: A host of the Kafka cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: The flag that defines whether a public IP address + is assigned to the node. + type: boolean + health: + description: Health of the host. + type: string + name: + description: The fully qualified domain name of the host. + type: string + role: + description: Role of the host in the cluster. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + type: string + zoneId: + description: The availability zone where the Kafka host + was created. + type: string + type: object + type: array + hostGroupIds: + description: A list of IDs of the host groups to place VMs of + the cluster on. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Kafka + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the Kafka cluster. The structure + is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 1 and 24. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the Kafka cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Kafka cluster belongs. + type: string + securityGroupIds: + description: Security group ids, to which the Kafka cluster belongs. + items: + type: string + type: array + x-kubernetes-list-type: set + status: + description: Status of the cluster. Can be either CREATING, STARTING, + RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. + For more information see status field of JSON representation + in the official documentation. + type: string + subnetIds: + description: IDs of the subnets, to which the Kafka cluster belongs. + items: + type: string + type: array + topic: + description: (Deprecated) To manage topics, please switch to using + a separate resource type yandex_mdb_kafka_topic. + items: + properties: + name: + description: The name of the topic. + type: string + partitions: + description: The number of the topic's partitions. + type: number + replicationFactor: + description: Amount of data copies (replicas) for the topic + in the cluster. + type: number + topicConfig: + description: User-defined settings for the topic. The structure + is documented below. + items: + properties: + cleanupPolicy: + type: string + compressionType: + description: Kafka topic settings. For more information, + see the official documentation and the Kafka documentation. + type: string + deleteRetentionMs: + type: string + fileDeleteDelayMs: + type: string + flushMessages: + type: string + flushMs: + type: string + maxMessageBytes: + type: string + minCompactionLagMs: + type: string + minInsyncReplicas: + type: string + preallocate: + type: boolean + retentionBytes: + type: string + retentionMs: + type: string + segmentBytes: + type: string + type: object + type: array + type: object + type: array + user: + description: (Deprecated) To manage users, please switch to using + a separate resource type yandex_mdb_kafka_user. + items: + properties: + name: + description: The name of the user. + type: string + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + allowHosts: + description: Set of hosts, to which this permission + grants access to. + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role type to grant to the topic. + type: string + topicName: + description: The name of the topic that the permission + grants access to. + type: string + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkaconnectors.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkaconnectors.yaml new file mode 100644 index 0000000..3637311 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkaconnectors.yaml @@ -0,0 +1,1034 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: kafkaconnectors.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: KafkaConnector + listKind: KafkaConnectorList + plural: kafkaconnectors + singular: kafkaconnector + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: KafkaConnector is the Schema for the KafkaConnectors API. Manages + a connectors of a Kafka cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KafkaConnectorSpec defines the desired state of KafkaConnector + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a KafkaCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a KafkaCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectorConfigMirrormaker: + description: Params for MirrorMaker2 connector. The structure + is documented below. + items: + properties: + replicationFactor: + description: Replication factor for topics created in target + cluster + type: number + sourceCluster: + description: Settings for source cluster. The structure + is documented below. + items: + properties: + alias: + description: Name of the cluster. Used also as a topic + prefix + type: string + externalCluster: + description: Connection params for external cluster + items: + properties: + bootstrapServers: + description: List of bootstrap servers to connect + to cluster + type: string + saslMechanism: + description: Type of SASL authentification mechanism + to use + type: string + saslPasswordSecretRef: + description: Password to use in SASL authentification + mechanism + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Username to use in SASL authentification + mechanism + type: string + securityProtocol: + description: Security protocol to use + type: string + type: object + type: array + thisCluster: + description: Using this section in the cluster definition + (source or target) means it's this cluster + items: + type: object + type: array + type: object + type: array + targetCluster: + description: Settings for target cluster. The structure + is documented below. + items: + properties: + alias: + description: Name of the cluster. Used also as a topic + prefix + type: string + externalCluster: + description: Connection params for external cluster + items: + properties: + bootstrapServers: + description: List of bootstrap servers to connect + to cluster + type: string + saslMechanism: + description: Type of SASL authentification mechanism + to use + type: string + saslPasswordSecretRef: + description: Password to use in SASL authentification + mechanism + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Username to use in SASL authentification + mechanism + type: string + securityProtocol: + description: Security protocol to use + type: string + type: object + type: array + thisCluster: + description: Using this section in the cluster definition + (source or target) means it's this cluster + items: + type: object + type: array + type: object + type: array + topics: + description: The pattern for topic names to be replicated. + type: string + type: object + type: array + connectorConfigS3Sink: + description: Params for S3 Sink connector. The structure is documented + below. + items: + properties: + fileCompressionType: + description: Сompression type for messages. Cannot be changed. + type: string + fileMaxRecords: + description: Max records per file. + type: number + s3Connection: + description: Settings for connection to s3-compatible storage. + The structure is documented below. + items: + properties: + bucketName: + description: Name of the bucket in s3-compatible storage. + type: string + externalS3: + description: Connection params for external s3-compatible + storage. The structure is documented below. + items: + properties: + accessKeyId: + description: ID of aws-compatible static key. + type: string + endpoint: + description: URL of s3-compatible storage. + type: string + region: + description: region of s3-compatible storage. + Available region list. + type: string + secretAccessKeySecretRef: + description: Secret key of aws-compatible static + key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + type: array + topics: + description: The pattern for topic names to be copied to + s3 bucket. + type: string + type: object + type: array + name: + description: The name of the connector. + type: string + properties: + additionalProperties: + type: string + description: Additional properties for connector. + type: object + x-kubernetes-map-type: granular + tasksMax: + description: The number of the connector's parallel working tasks. + Default is the number of brokers + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a KafkaCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a KafkaCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectorConfigMirrormaker: + description: Params for MirrorMaker2 connector. The structure + is documented below. + items: + properties: + replicationFactor: + description: Replication factor for topics created in target + cluster + type: number + sourceCluster: + description: Settings for source cluster. The structure + is documented below. + items: + properties: + alias: + description: Name of the cluster. Used also as a topic + prefix + type: string + externalCluster: + description: Connection params for external cluster + items: + properties: + bootstrapServers: + description: List of bootstrap servers to connect + to cluster + type: string + saslMechanism: + description: Type of SASL authentification mechanism + to use + type: string + saslPasswordSecretRef: + description: Password to use in SASL authentification + mechanism + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Username to use in SASL authentification + mechanism + type: string + securityProtocol: + description: Security protocol to use + type: string + type: object + type: array + thisCluster: + description: Using this section in the cluster definition + (source or target) means it's this cluster + items: + type: object + type: array + type: object + type: array + targetCluster: + description: Settings for target cluster. The structure + is documented below. + items: + properties: + alias: + description: Name of the cluster. Used also as a topic + prefix + type: string + externalCluster: + description: Connection params for external cluster + items: + properties: + bootstrapServers: + description: List of bootstrap servers to connect + to cluster + type: string + saslMechanism: + description: Type of SASL authentification mechanism + to use + type: string + saslPasswordSecretRef: + description: Password to use in SASL authentification + mechanism + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Username to use in SASL authentification + mechanism + type: string + securityProtocol: + description: Security protocol to use + type: string + type: object + type: array + thisCluster: + description: Using this section in the cluster definition + (source or target) means it's this cluster + items: + type: object + type: array + type: object + type: array + topics: + description: The pattern for topic names to be replicated. + type: string + type: object + type: array + connectorConfigS3Sink: + description: Params for S3 Sink connector. The structure is documented + below. + items: + properties: + fileCompressionType: + description: Сompression type for messages. Cannot be changed. + type: string + fileMaxRecords: + description: Max records per file. + type: number + s3Connection: + description: Settings for connection to s3-compatible storage. + The structure is documented below. + items: + properties: + bucketName: + description: Name of the bucket in s3-compatible storage. + type: string + externalS3: + description: Connection params for external s3-compatible + storage. The structure is documented below. + items: + properties: + accessKeyId: + description: ID of aws-compatible static key. + type: string + endpoint: + description: URL of s3-compatible storage. + type: string + region: + description: region of s3-compatible storage. + Available region list. + type: string + secretAccessKeySecretRef: + description: Secret key of aws-compatible static + key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + type: array + topics: + description: The pattern for topic names to be copied to + s3 bucket. + type: string + type: object + type: array + name: + description: The name of the connector. + type: string + properties: + additionalProperties: + type: string + description: Additional properties for connector. + type: object + x-kubernetes-map-type: granular + tasksMax: + description: The number of the connector's parallel working tasks. + Default is the number of brokers + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: KafkaConnectorStatus defines the observed state of KafkaConnector. + properties: + atProvider: + properties: + clusterId: + type: string + connectorConfigMirrormaker: + description: Params for MirrorMaker2 connector. The structure + is documented below. + items: + properties: + replicationFactor: + description: Replication factor for topics created in target + cluster + type: number + sourceCluster: + description: Settings for source cluster. The structure + is documented below. + items: + properties: + alias: + description: Name of the cluster. Used also as a topic + prefix + type: string + externalCluster: + description: Connection params for external cluster + items: + properties: + bootstrapServers: + description: List of bootstrap servers to connect + to cluster + type: string + saslMechanism: + description: Type of SASL authentification mechanism + to use + type: string + saslUsername: + description: Username to use in SASL authentification + mechanism + type: string + securityProtocol: + description: Security protocol to use + type: string + type: object + type: array + thisCluster: + description: Using this section in the cluster definition + (source or target) means it's this cluster + items: + type: object + type: array + type: object + type: array + targetCluster: + description: Settings for target cluster. The structure + is documented below. + items: + properties: + alias: + description: Name of the cluster. Used also as a topic + prefix + type: string + externalCluster: + description: Connection params for external cluster + items: + properties: + bootstrapServers: + description: List of bootstrap servers to connect + to cluster + type: string + saslMechanism: + description: Type of SASL authentification mechanism + to use + type: string + saslUsername: + description: Username to use in SASL authentification + mechanism + type: string + securityProtocol: + description: Security protocol to use + type: string + type: object + type: array + thisCluster: + description: Using this section in the cluster definition + (source or target) means it's this cluster + items: + type: object + type: array + type: object + type: array + topics: + description: The pattern for topic names to be replicated. + type: string + type: object + type: array + connectorConfigS3Sink: + description: Params for S3 Sink connector. The structure is documented + below. + items: + properties: + fileCompressionType: + description: Сompression type for messages. Cannot be changed. + type: string + fileMaxRecords: + description: Max records per file. + type: number + s3Connection: + description: Settings for connection to s3-compatible storage. + The structure is documented below. + items: + properties: + bucketName: + description: Name of the bucket in s3-compatible storage. + type: string + externalS3: + description: Connection params for external s3-compatible + storage. The structure is documented below. + items: + properties: + accessKeyId: + description: ID of aws-compatible static key. + type: string + endpoint: + description: URL of s3-compatible storage. + type: string + region: + description: region of s3-compatible storage. + Available region list. + type: string + type: object + type: array + type: object + type: array + topics: + description: The pattern for topic names to be copied to + s3 bucket. + type: string + type: object + type: array + id: + type: string + name: + description: The name of the connector. + type: string + properties: + additionalProperties: + type: string + description: Additional properties for connector. + type: object + x-kubernetes-map-type: granular + tasksMax: + description: The number of the connector's parallel working tasks. + Default is the number of brokers + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkatopics.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkatopics.yaml new file mode 100644 index 0000000..e2a561f --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkatopics.yaml @@ -0,0 +1,628 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: kafkatopics.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: KafkaTopic + listKind: KafkaTopicList + plural: kafkatopics + singular: kafkatopic + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: KafkaTopic is the Schema for the KafkaTopics API. Manages a topic + of a Kafka cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KafkaTopicSpec defines the desired state of KafkaTopic + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a KafkaCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a KafkaCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the topic. + type: string + partitions: + description: The number of the topic's partitions. + type: number + replicationFactor: + description: Amount of data copies (replicas) for the topic in + the cluster. + type: number + topicConfig: + description: User-defined settings for the topic. The structure + is documented below. + items: + properties: + cleanupPolicy: + description: Kafka topic settings. For more information, + see the official documentation and the Kafka documentation. + type: string + compressionType: + type: string + deleteRetentionMs: + type: string + fileDeleteDelayMs: + type: string + flushMessages: + type: string + flushMs: + type: string + maxMessageBytes: + type: string + minCompactionLagMs: + type: string + minInsyncReplicas: + type: string + preallocate: + type: boolean + retentionBytes: + type: string + retentionMs: + type: string + segmentBytes: + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a KafkaCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a KafkaCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the topic. + type: string + partitions: + description: The number of the topic's partitions. + type: number + replicationFactor: + description: Amount of data copies (replicas) for the topic in + the cluster. + type: number + topicConfig: + description: User-defined settings for the topic. The structure + is documented below. + items: + properties: + cleanupPolicy: + description: Kafka topic settings. For more information, + see the official documentation and the Kafka documentation. + type: string + compressionType: + type: string + deleteRetentionMs: + type: string + fileDeleteDelayMs: + type: string + flushMessages: + type: string + flushMs: + type: string + maxMessageBytes: + type: string + minCompactionLagMs: + type: string + minInsyncReplicas: + type: string + preallocate: + type: boolean + retentionBytes: + type: string + retentionMs: + type: string + segmentBytes: + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.partitions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.partitions) + || (has(self.initProvider) && has(self.initProvider.partitions))' + - message: spec.forProvider.replicationFactor is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.replicationFactor) + || (has(self.initProvider) && has(self.initProvider.replicationFactor))' + status: + description: KafkaTopicStatus defines the observed state of KafkaTopic. + properties: + atProvider: + properties: + clusterId: + type: string + id: + type: string + name: + description: The name of the topic. + type: string + partitions: + description: The number of the topic's partitions. + type: number + replicationFactor: + description: Amount of data copies (replicas) for the topic in + the cluster. + type: number + topicConfig: + description: User-defined settings for the topic. The structure + is documented below. + items: + properties: + cleanupPolicy: + description: Kafka topic settings. For more information, + see the official documentation and the Kafka documentation. + type: string + compressionType: + type: string + deleteRetentionMs: + type: string + fileDeleteDelayMs: + type: string + flushMessages: + type: string + flushMs: + type: string + maxMessageBytes: + type: string + minCompactionLagMs: + type: string + minInsyncReplicas: + type: string + preallocate: + type: boolean + retentionBytes: + type: string + retentionMs: + type: string + segmentBytes: + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkausers.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkausers.yaml new file mode 100644 index 0000000..88da282 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_kafkausers.yaml @@ -0,0 +1,596 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: kafkausers.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: KafkaUser + listKind: KafkaUserList + plural: kafkausers + singular: kafkauser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: KafkaUser is the Schema for the KafkaUsers API. Manages a user + of a Kafka cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KafkaUserSpec defines the desired state of KafkaUser + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a KafkaCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a KafkaCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + allowHosts: + description: Set of hosts, to which this permission grants + access to. + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role type to grant to the topic. + type: string + topicName: + description: The name of the topic that the permission grants + access to. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a KafkaCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a KafkaCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + allowHosts: + description: Set of hosts, to which this permission grants + access to. + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role type to grant to the topic. + type: string + topicName: + description: The name of the topic that the permission grants + access to. + type: string + type: object + type: array + required: + - passwordSecretRef + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.passwordSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)' + status: + description: KafkaUserStatus defines the observed state of KafkaUser. + properties: + atProvider: + properties: + clusterId: + type: string + id: + type: string + name: + description: The name of the user. + type: string + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + allowHosts: + description: Set of hosts, to which this permission grants + access to. + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role type to grant to the topic. + type: string + topicName: + description: The name of the topic that the permission grants + access to. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbclusters.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbclusters.yaml new file mode 100644 index 0000000..c9aebe1 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbclusters.yaml @@ -0,0 +1,3095 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: mongodbclusters.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: MongodbCluster + listKind: MongodbClusterList + plural: mongodbclusters + singular: mongodbcluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: MongodbCluster is the Schema for the MongodbClusters API. Manages + a MongoDB cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MongodbClusterSpec defines the desired state of MongodbCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterConfig: + description: Configuration of the MongoDB subcluster. The structure + is documented below. + items: + properties: + access: + description: Access policy to the MongoDB cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + type: object + type: array + backupRetainPeriodDays: + description: Retain period of automatically created backup + in days. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC + timezone. The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + featureCompatibilityVersion: + description: Feature compatibility version of MongoDB. If + not provided version is taken. Can be either 6.0, 5.0, + 4.4 and 4.2. + type: string + mongocfg: + description: Configuration of the mongocfg service. The + structure is documented below. + items: + properties: + net: + description: A set of network settings (see the net + option). The structure is documented below. + items: + properties: + maxIncomingConnections: + description: The maximum number of simultaneous + connections that host will accept. For more + information, see the net.maxIncomingConnections + description in the official documentation. + type: number + type: object + type: array + operationProfiling: + description: A set of profiling settings (see the + operationProfiling option). The structure is documented + below. + items: + properties: + mode: + description: 'Specifies which operations should + be profiled. The following profiler levels + are available: off, slow_op, all. For more + information, see the operationProfiling.mode + description in the official documentation.' + type: string + slowOpThreshold: + description: The slow operation time threshold, + in milliseconds. Operations that run for longer + than this threshold are considered slow. For + more information, see the operationProfiling.slowOpThresholdMs + description in the official documentation. + type: number + type: object + type: array + storage: + description: A set of storage settings (see the storage + option). The structure is documented below. + items: + properties: + wiredTiger: + description: The WiredTiger engine settings. + (see the storage.wiredTiger option). These + settings available only on mongod hosts. The + structure is documented below. + items: + properties: + cacheSizeGb: + description: Defines the maximum size + of the internal cache that WiredTiger + will use for all data. For more information, + see the storage.wiredTiger.engineConfig.cacheSizeGB + description in the official documentation. + type: number + type: object + type: array + type: object + type: array + type: object + type: array + mongod: + description: Configuration of the mongod service. The structure + is documented below. + items: + properties: + auditLog: + description: A set of audit log settings (see the + auditLog option). The structure is documented below. + Available only in enterprise edition. + items: + properties: + filter: + description: Configuration of the audit log + filter in JSON format. For more information + see auditLog.filter description in the official + documentation. Available only in enterprise + edition. + type: string + runtimeConfiguration: + description: Specifies if a node allows runtime + configuration of audit filters and the auditAuthorizationSuccess + variable. For more information see auditLog.runtimeConfiguration + description in the official documentation. + Available only in enterprise edition. + type: boolean + type: object + type: array + net: + description: A set of network settings (see the net + option). The structure is documented below. + items: + properties: + compressors: + description: 'Specifies the default compressor(s) + to use for communication between this mongod + or mongos. Accepts array of compressors. Order + matters. Available compressors: snappy, zlib, + zstd, disabled. To disable network compression, + make "disabled" the only value. For more information, + see the net.Compression.Compressors description + in the official documentation.' + items: + type: string + type: array + maxIncomingConnections: + description: The maximum number of simultaneous + connections that host will accept. For more + information, see the net.maxIncomingConnections + description in the official documentation. + type: number + type: object + type: array + operationProfiling: + description: A set of profiling settings (see the + operationProfiling option). The structure is documented + below. + items: + properties: + mode: + description: 'Specifies which operations should + be profiled. The following profiler levels + are available: off, slow_op, all. For more + information, see the operationProfiling.mode + description in the official documentation.' + type: string + slowOpSampleRate: + description: The fraction of slow operations + that should be profiled or logged. Accepts + values between 0 and 1, inclusive. For more + information, see the operationProfiling.slowOpSampleRate + description in the official documentation. + type: number + slowOpThreshold: + description: The slow operation time threshold, + in milliseconds. Operations that run for longer + than this threshold are considered slow. For + more information, see the operationProfiling.slowOpThresholdMs + description in the official documentation. + type: number + type: object + type: array + security: + description: A set of MongoDB Security settings (see + the security option). The structure is documented + below. Available only in enterprise edition. + items: + properties: + enableEncryption: + description: Enables the encryption for the + WiredTiger storage engine. Can be either true + or false. For more information see security.enableEncryption + description in the official documentation. + Available only in enterprise edition. + type: boolean + kmip: + description: Configuration of the third party + key management appliance via the Key Management + Interoperability Protocol (KMIP) (see Encryption + tutorial ). Requires enable_encryption to + be true. The structure is documented below. + Available only in enterprise edition. + items: + properties: + clientCertificate: + description: String containing the client + certificate used for authenticating + MongoDB to the KMIP server. For more + information see security.kmip.clientCertificateFile + description in the official documentation. + type: string + keyIdentifier: + description: Unique KMIP identifier for + an existing key within the KMIP server. + For more information see security.kmip.keyIdentifier + description in the official documentation. + type: string + port: + description: 'Port number to use to communicate + with the KMIP server. Default: 5696 + For more information see security.kmip.port + description in the official documentation.' + type: number + serverCa: + description: Path to CA File. Used for + validating secure client connection + to KMIP server. For more information + see security.kmip.serverCAFile description + in the official documentation. + type: string + serverName: + description: Hostname or IP address of + the KMIP server to connect to. For more + information see security.kmip.serverName + description in the official documentation. + type: string + type: object + type: array + type: object + type: array + setParameter: + description: A set of MongoDB Server Parameters (see + the setParameter option). The structure is documented + below. + items: + properties: + auditAuthorizationSuccess: + description: Enables the auditing of authorization + successes. Can be either true or false. For + more information, see the auditAuthorizationSuccess + description in the official documentation. + Available only in enterprise edition. + type: boolean + enableFlowControl: + description: Enables the flow control. Can be + either true or false. For more information, + see the enableFlowControl description in the + official documentation. + type: boolean + minSnapshotHistoryWindowInSeconds: + description: The minimum time window in seconds + for which the storage engine keeps the snapshot + history. For more information, see the minSnapshotHistoryWindowInSeconds + description in the official documentation. + type: number + type: object + type: array + storage: + description: A set of storage settings (see the storage + option). The structure is documented below. + items: + properties: + journal: + description: The durability journal to ensure + data files remain valid and recoverable. The + structure is documented below. + items: + properties: + commitInterval: + description: The maximum amount of time + in milliseconds that the mongod process + allows between journal operations. For + more information, see the storage.journal.commitIntervalMs + description in the official documentation. + type: number + type: object + type: array + wiredTiger: + description: The WiredTiger engine settings. + (see the storage.wiredTiger option). These + settings available only on mongod hosts. The + structure is documented below. + items: + properties: + blockCompressor: + description: 'Specifies the default compression + for collection data. You can override + this on a per-collection basis when + creating collections. Available compressors + are: none, snappy, zlib, zstd. This + setting available only on mongod hosts. + For more information, see the storage.wiredTiger.collectionConfig.blockCompressor + description in the official documentation.' + type: string + cacheSizeGb: + description: Defines the maximum size + of the internal cache that WiredTiger + will use for all data. For more information, + see the storage.wiredTiger.engineConfig.cacheSizeGB + description in the official documentation. + type: number + prefixCompression: + description: Enables or disables prefix + compression for index data. Сan be either + true or false. For more information, + see the storage.wiredTiger.indexConfig.prefixCompression + description in the official documentation. + type: boolean + type: object + type: array + type: object + type: array + type: object + type: array + mongos: + description: Configuration of the mongos service. The structure + is documented below. + items: + properties: + net: + description: A set of network settings (see the net + option). The structure is documented below. + items: + properties: + compressors: + description: 'Specifies the default compressor(s) + to use for communication between this mongod + or mongos. Accepts array of compressors. Order + matters. Available compressors: snappy, zlib, + zstd, disabled. To disable network compression, + make "disabled" the only value. For more information, + see the net.Compression.Compressors description + in the official documentation.' + items: + type: string + type: array + maxIncomingConnections: + description: The maximum number of simultaneous + connections that host will accept. For more + information, see the net.maxIncomingConnections + description in the official documentation. + type: number + type: object + type: array + type: object + type: array + performanceDiagnostics: + description: Performance diagnostics to the MongoDB cluster. + The structure is documented below. + items: + properties: + enabled: + description: Enable or disable performance diagnostics. + type: boolean + type: object + type: array + version: + description: Version of the MongoDB server software. Can + be either 4.2, 4.4, 4.4-enterprise, 5.0, 5.0-enterprise, + 6.0 and 6.0-enterprise. + type: string + type: object + type: array + clusterId: + description: The ID of the cluster. + type: string + database: + description: A database of the MongoDB cluster. The structure + is documented below. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the MongoDB cluster. + type: string + diskSizeAutoscalingMongocfg: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + diskSizeAutoscalingMongod: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + diskSizeAutoscalingMongoinfra: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + diskSizeAutoscalingMongos: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + environment: + description: Deployment environment of the MongoDB cluster. Can + be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the MongoDB cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: Should this host have assigned public IP assigned. + Can be either true or false. + type: boolean + hostParameters: + description: The parameters of mongod host in replicaset. + items: + properties: + hidden: + description: Should this host be hidden in replicaset. + Can be either true of false. For more information + see the official documentation + type: boolean + priority: + description: A floating point number that indicates + the relative likelihood of a replica set member + to become the primary. For more information see + the official documentation + type: number + secondaryDelaySecs: + description: The number of seconds "behind" the primary + that this replica set member should "lag". For more + information see the official documentation + type: number + tags: + additionalProperties: + type: string + description: A set of key/value pairs to assign for + the replica set member. For more information see + the official documentation + type: object + x-kubernetes-map-type: granular + type: object + type: array + role: + description: The role of the cluster (either PRIMARY or + SECONDARY). + type: string + shardName: + description: The name of the shard to which the host belongs. + Only for sharded cluster. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: type of mongo daemon which runs on this host + (mongod, mongos, mongocfg, mongoinfra). Defaults to mongod. + type: string + zoneId: + description: The availability zone where the MongoDB host + will be created. For more information see the official + documentation. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the MongoDB + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance window settings of the MongoDB cluster. + The structure is documented below. + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the MongoDB cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the MongoDB cluster belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resources: + description: (DEPRECATED, use resources_* instead) Resources allocated + to hosts of the MongoDB cluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongocfg: + description: Resources allocated to mongocfg hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongod: + description: Resources allocated to mongod hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongoinfra: + description: Resources allocated to mongoinfra hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongos: + description: Resources allocated to mongos hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + restore: + description: The cluster will be created from the specified backup. + The structure is documented below. + items: + properties: + backupId: + description: Backup ID. The cluster will be created from + the specified backup. How to get a list of PostgreSQL + backups + type: string + time: + description: 'Timestamp of the moment to which the MongoDB + cluster should be restored. (Format: "2006-01-02T15:04:05" + - UTC). When not set, current time is used.' + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + user: + description: A user of the MongoDB cluster. The structure is documented + below. + items: + properties: + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: The roles of the user in this database. + For more information see the official documentation. + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterConfig: + description: Configuration of the MongoDB subcluster. The structure + is documented below. + items: + properties: + access: + description: Access policy to the MongoDB cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + type: object + type: array + backupRetainPeriodDays: + description: Retain period of automatically created backup + in days. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC + timezone. The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + featureCompatibilityVersion: + description: Feature compatibility version of MongoDB. If + not provided version is taken. Can be either 6.0, 5.0, + 4.4 and 4.2. + type: string + mongocfg: + description: Configuration of the mongocfg service. The + structure is documented below. + items: + properties: + net: + description: A set of network settings (see the net + option). The structure is documented below. + items: + properties: + maxIncomingConnections: + description: The maximum number of simultaneous + connections that host will accept. For more + information, see the net.maxIncomingConnections + description in the official documentation. + type: number + type: object + type: array + operationProfiling: + description: A set of profiling settings (see the + operationProfiling option). The structure is documented + below. + items: + properties: + mode: + description: 'Specifies which operations should + be profiled. The following profiler levels + are available: off, slow_op, all. For more + information, see the operationProfiling.mode + description in the official documentation.' + type: string + slowOpThreshold: + description: The slow operation time threshold, + in milliseconds. Operations that run for longer + than this threshold are considered slow. For + more information, see the operationProfiling.slowOpThresholdMs + description in the official documentation. + type: number + type: object + type: array + storage: + description: A set of storage settings (see the storage + option). The structure is documented below. + items: + properties: + wiredTiger: + description: The WiredTiger engine settings. + (see the storage.wiredTiger option). These + settings available only on mongod hosts. The + structure is documented below. + items: + properties: + cacheSizeGb: + description: Defines the maximum size + of the internal cache that WiredTiger + will use for all data. For more information, + see the storage.wiredTiger.engineConfig.cacheSizeGB + description in the official documentation. + type: number + type: object + type: array + type: object + type: array + type: object + type: array + mongod: + description: Configuration of the mongod service. The structure + is documented below. + items: + properties: + auditLog: + description: A set of audit log settings (see the + auditLog option). The structure is documented below. + Available only in enterprise edition. + items: + properties: + filter: + description: Configuration of the audit log + filter in JSON format. For more information + see auditLog.filter description in the official + documentation. Available only in enterprise + edition. + type: string + runtimeConfiguration: + description: Specifies if a node allows runtime + configuration of audit filters and the auditAuthorizationSuccess + variable. For more information see auditLog.runtimeConfiguration + description in the official documentation. + Available only in enterprise edition. + type: boolean + type: object + type: array + net: + description: A set of network settings (see the net + option). The structure is documented below. + items: + properties: + compressors: + description: 'Specifies the default compressor(s) + to use for communication between this mongod + or mongos. Accepts array of compressors. Order + matters. Available compressors: snappy, zlib, + zstd, disabled. To disable network compression, + make "disabled" the only value. For more information, + see the net.Compression.Compressors description + in the official documentation.' + items: + type: string + type: array + maxIncomingConnections: + description: The maximum number of simultaneous + connections that host will accept. For more + information, see the net.maxIncomingConnections + description in the official documentation. + type: number + type: object + type: array + operationProfiling: + description: A set of profiling settings (see the + operationProfiling option). The structure is documented + below. + items: + properties: + mode: + description: 'Specifies which operations should + be profiled. The following profiler levels + are available: off, slow_op, all. For more + information, see the operationProfiling.mode + description in the official documentation.' + type: string + slowOpSampleRate: + description: The fraction of slow operations + that should be profiled or logged. Accepts + values between 0 and 1, inclusive. For more + information, see the operationProfiling.slowOpSampleRate + description in the official documentation. + type: number + slowOpThreshold: + description: The slow operation time threshold, + in milliseconds. Operations that run for longer + than this threshold are considered slow. For + more information, see the operationProfiling.slowOpThresholdMs + description in the official documentation. + type: number + type: object + type: array + security: + description: A set of MongoDB Security settings (see + the security option). The structure is documented + below. Available only in enterprise edition. + items: + properties: + enableEncryption: + description: Enables the encryption for the + WiredTiger storage engine. Can be either true + or false. For more information see security.enableEncryption + description in the official documentation. + Available only in enterprise edition. + type: boolean + kmip: + description: Configuration of the third party + key management appliance via the Key Management + Interoperability Protocol (KMIP) (see Encryption + tutorial ). Requires enable_encryption to + be true. The structure is documented below. + Available only in enterprise edition. + items: + properties: + clientCertificate: + description: String containing the client + certificate used for authenticating + MongoDB to the KMIP server. For more + information see security.kmip.clientCertificateFile + description in the official documentation. + type: string + keyIdentifier: + description: Unique KMIP identifier for + an existing key within the KMIP server. + For more information see security.kmip.keyIdentifier + description in the official documentation. + type: string + port: + description: 'Port number to use to communicate + with the KMIP server. Default: 5696 + For more information see security.kmip.port + description in the official documentation.' + type: number + serverCa: + description: Path to CA File. Used for + validating secure client connection + to KMIP server. For more information + see security.kmip.serverCAFile description + in the official documentation. + type: string + serverName: + description: Hostname or IP address of + the KMIP server to connect to. For more + information see security.kmip.serverName + description in the official documentation. + type: string + type: object + type: array + type: object + type: array + setParameter: + description: A set of MongoDB Server Parameters (see + the setParameter option). The structure is documented + below. + items: + properties: + auditAuthorizationSuccess: + description: Enables the auditing of authorization + successes. Can be either true or false. For + more information, see the auditAuthorizationSuccess + description in the official documentation. + Available only in enterprise edition. + type: boolean + enableFlowControl: + description: Enables the flow control. Can be + either true or false. For more information, + see the enableFlowControl description in the + official documentation. + type: boolean + minSnapshotHistoryWindowInSeconds: + description: The minimum time window in seconds + for which the storage engine keeps the snapshot + history. For more information, see the minSnapshotHistoryWindowInSeconds + description in the official documentation. + type: number + type: object + type: array + storage: + description: A set of storage settings (see the storage + option). The structure is documented below. + items: + properties: + journal: + description: The durability journal to ensure + data files remain valid and recoverable. The + structure is documented below. + items: + properties: + commitInterval: + description: The maximum amount of time + in milliseconds that the mongod process + allows between journal operations. For + more information, see the storage.journal.commitIntervalMs + description in the official documentation. + type: number + type: object + type: array + wiredTiger: + description: The WiredTiger engine settings. + (see the storage.wiredTiger option). These + settings available only on mongod hosts. The + structure is documented below. + items: + properties: + blockCompressor: + description: 'Specifies the default compression + for collection data. You can override + this on a per-collection basis when + creating collections. Available compressors + are: none, snappy, zlib, zstd. This + setting available only on mongod hosts. + For more information, see the storage.wiredTiger.collectionConfig.blockCompressor + description in the official documentation.' + type: string + cacheSizeGb: + description: Defines the maximum size + of the internal cache that WiredTiger + will use for all data. For more information, + see the storage.wiredTiger.engineConfig.cacheSizeGB + description in the official documentation. + type: number + prefixCompression: + description: Enables or disables prefix + compression for index data. Сan be either + true or false. For more information, + see the storage.wiredTiger.indexConfig.prefixCompression + description in the official documentation. + type: boolean + type: object + type: array + type: object + type: array + type: object + type: array + mongos: + description: Configuration of the mongos service. The structure + is documented below. + items: + properties: + net: + description: A set of network settings (see the net + option). The structure is documented below. + items: + properties: + compressors: + description: 'Specifies the default compressor(s) + to use for communication between this mongod + or mongos. Accepts array of compressors. Order + matters. Available compressors: snappy, zlib, + zstd, disabled. To disable network compression, + make "disabled" the only value. For more information, + see the net.Compression.Compressors description + in the official documentation.' + items: + type: string + type: array + maxIncomingConnections: + description: The maximum number of simultaneous + connections that host will accept. For more + information, see the net.maxIncomingConnections + description in the official documentation. + type: number + type: object + type: array + type: object + type: array + performanceDiagnostics: + description: Performance diagnostics to the MongoDB cluster. + The structure is documented below. + items: + properties: + enabled: + description: Enable or disable performance diagnostics. + type: boolean + type: object + type: array + version: + description: Version of the MongoDB server software. Can + be either 4.2, 4.4, 4.4-enterprise, 5.0, 5.0-enterprise, + 6.0 and 6.0-enterprise. + type: string + type: object + type: array + clusterId: + description: The ID of the cluster. + type: string + database: + description: A database of the MongoDB cluster. The structure + is documented below. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the MongoDB cluster. + type: string + diskSizeAutoscalingMongocfg: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + diskSizeAutoscalingMongod: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + diskSizeAutoscalingMongoinfra: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + diskSizeAutoscalingMongos: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + environment: + description: Deployment environment of the MongoDB cluster. Can + be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the MongoDB cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: Should this host have assigned public IP assigned. + Can be either true or false. + type: boolean + hostParameters: + description: The parameters of mongod host in replicaset. + items: + properties: + hidden: + description: Should this host be hidden in replicaset. + Can be either true of false. For more information + see the official documentation + type: boolean + priority: + description: A floating point number that indicates + the relative likelihood of a replica set member + to become the primary. For more information see + the official documentation + type: number + secondaryDelaySecs: + description: The number of seconds "behind" the primary + that this replica set member should "lag". For more + information see the official documentation + type: number + tags: + additionalProperties: + type: string + description: A set of key/value pairs to assign for + the replica set member. For more information see + the official documentation + type: object + x-kubernetes-map-type: granular + type: object + type: array + role: + description: The role of the cluster (either PRIMARY or + SECONDARY). + type: string + shardName: + description: The name of the shard to which the host belongs. + Only for sharded cluster. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: type of mongo daemon which runs on this host + (mongod, mongos, mongocfg, mongoinfra). Defaults to mongod. + type: string + zoneId: + description: The availability zone where the MongoDB host + will be created. For more information see the official + documentation. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the MongoDB + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance window settings of the MongoDB cluster. + The structure is documented below. + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the MongoDB cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the MongoDB cluster belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resources: + description: (DEPRECATED, use resources_* instead) Resources allocated + to hosts of the MongoDB cluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongocfg: + description: Resources allocated to mongocfg hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongod: + description: Resources allocated to mongod hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongoinfra: + description: Resources allocated to mongoinfra hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongos: + description: Resources allocated to mongos hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + restore: + description: The cluster will be created from the specified backup. + The structure is documented below. + items: + properties: + backupId: + description: Backup ID. The cluster will be created from + the specified backup. How to get a list of PostgreSQL + backups + type: string + time: + description: 'Timestamp of the moment to which the MongoDB + cluster should be restored. (Format: "2006-01-02T15:04:05" + - UTC). When not set, current time is used.' + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + user: + description: A user of the MongoDB cluster. The structure is documented + below. + items: + properties: + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: The roles of the user in this database. + For more information see the official documentation. + items: + type: string + type: array + type: object + type: array + required: + - passwordSecretRef + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterConfig) + || (has(self.initProvider) && has(self.initProvider.clusterConfig))' + - message: spec.forProvider.environment is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.environment) + || (has(self.initProvider) && has(self.initProvider.environment))' + - message: spec.forProvider.host is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.host) + || (has(self.initProvider) && has(self.initProvider.host))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: MongodbClusterStatus defines the observed state of MongodbCluster. + properties: + atProvider: + properties: + clusterConfig: + description: Configuration of the MongoDB subcluster. The structure + is documented below. + items: + properties: + access: + description: Access policy to the MongoDB cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + type: object + type: array + backupRetainPeriodDays: + description: Retain period of automatically created backup + in days. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC + timezone. The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + featureCompatibilityVersion: + description: Feature compatibility version of MongoDB. If + not provided version is taken. Can be either 6.0, 5.0, + 4.4 and 4.2. + type: string + mongocfg: + description: Configuration of the mongocfg service. The + structure is documented below. + items: + properties: + net: + description: A set of network settings (see the net + option). The structure is documented below. + items: + properties: + maxIncomingConnections: + description: The maximum number of simultaneous + connections that host will accept. For more + information, see the net.maxIncomingConnections + description in the official documentation. + type: number + type: object + type: array + operationProfiling: + description: A set of profiling settings (see the + operationProfiling option). The structure is documented + below. + items: + properties: + mode: + description: 'Specifies which operations should + be profiled. The following profiler levels + are available: off, slow_op, all. For more + information, see the operationProfiling.mode + description in the official documentation.' + type: string + slowOpThreshold: + description: The slow operation time threshold, + in milliseconds. Operations that run for longer + than this threshold are considered slow. For + more information, see the operationProfiling.slowOpThresholdMs + description in the official documentation. + type: number + type: object + type: array + storage: + description: A set of storage settings (see the storage + option). The structure is documented below. + items: + properties: + wiredTiger: + description: The WiredTiger engine settings. + (see the storage.wiredTiger option). These + settings available only on mongod hosts. The + structure is documented below. + items: + properties: + cacheSizeGb: + description: Defines the maximum size + of the internal cache that WiredTiger + will use for all data. For more information, + see the storage.wiredTiger.engineConfig.cacheSizeGB + description in the official documentation. + type: number + type: object + type: array + type: object + type: array + type: object + type: array + mongod: + description: Configuration of the mongod service. The structure + is documented below. + items: + properties: + auditLog: + description: A set of audit log settings (see the + auditLog option). The structure is documented below. + Available only in enterprise edition. + items: + properties: + filter: + description: Configuration of the audit log + filter in JSON format. For more information + see auditLog.filter description in the official + documentation. Available only in enterprise + edition. + type: string + runtimeConfiguration: + description: Specifies if a node allows runtime + configuration of audit filters and the auditAuthorizationSuccess + variable. For more information see auditLog.runtimeConfiguration + description in the official documentation. + Available only in enterprise edition. + type: boolean + type: object + type: array + net: + description: A set of network settings (see the net + option). The structure is documented below. + items: + properties: + compressors: + description: 'Specifies the default compressor(s) + to use for communication between this mongod + or mongos. Accepts array of compressors. Order + matters. Available compressors: snappy, zlib, + zstd, disabled. To disable network compression, + make "disabled" the only value. For more information, + see the net.Compression.Compressors description + in the official documentation.' + items: + type: string + type: array + maxIncomingConnections: + description: The maximum number of simultaneous + connections that host will accept. For more + information, see the net.maxIncomingConnections + description in the official documentation. + type: number + type: object + type: array + operationProfiling: + description: A set of profiling settings (see the + operationProfiling option). The structure is documented + below. + items: + properties: + mode: + description: 'Specifies which operations should + be profiled. The following profiler levels + are available: off, slow_op, all. For more + information, see the operationProfiling.mode + description in the official documentation.' + type: string + slowOpSampleRate: + description: The fraction of slow operations + that should be profiled or logged. Accepts + values between 0 and 1, inclusive. For more + information, see the operationProfiling.slowOpSampleRate + description in the official documentation. + type: number + slowOpThreshold: + description: The slow operation time threshold, + in milliseconds. Operations that run for longer + than this threshold are considered slow. For + more information, see the operationProfiling.slowOpThresholdMs + description in the official documentation. + type: number + type: object + type: array + security: + description: A set of MongoDB Security settings (see + the security option). The structure is documented + below. Available only in enterprise edition. + items: + properties: + enableEncryption: + description: Enables the encryption for the + WiredTiger storage engine. Can be either true + or false. For more information see security.enableEncryption + description in the official documentation. + Available only in enterprise edition. + type: boolean + kmip: + description: Configuration of the third party + key management appliance via the Key Management + Interoperability Protocol (KMIP) (see Encryption + tutorial ). Requires enable_encryption to + be true. The structure is documented below. + Available only in enterprise edition. + items: + properties: + clientCertificate: + description: String containing the client + certificate used for authenticating + MongoDB to the KMIP server. For more + information see security.kmip.clientCertificateFile + description in the official documentation. + type: string + keyIdentifier: + description: Unique KMIP identifier for + an existing key within the KMIP server. + For more information see security.kmip.keyIdentifier + description in the official documentation. + type: string + port: + description: 'Port number to use to communicate + with the KMIP server. Default: 5696 + For more information see security.kmip.port + description in the official documentation.' + type: number + serverCa: + description: Path to CA File. Used for + validating secure client connection + to KMIP server. For more information + see security.kmip.serverCAFile description + in the official documentation. + type: string + serverName: + description: Hostname or IP address of + the KMIP server to connect to. For more + information see security.kmip.serverName + description in the official documentation. + type: string + type: object + type: array + type: object + type: array + setParameter: + description: A set of MongoDB Server Parameters (see + the setParameter option). The structure is documented + below. + items: + properties: + auditAuthorizationSuccess: + description: Enables the auditing of authorization + successes. Can be either true or false. For + more information, see the auditAuthorizationSuccess + description in the official documentation. + Available only in enterprise edition. + type: boolean + enableFlowControl: + description: Enables the flow control. Can be + either true or false. For more information, + see the enableFlowControl description in the + official documentation. + type: boolean + minSnapshotHistoryWindowInSeconds: + description: The minimum time window in seconds + for which the storage engine keeps the snapshot + history. For more information, see the minSnapshotHistoryWindowInSeconds + description in the official documentation. + type: number + type: object + type: array + storage: + description: A set of storage settings (see the storage + option). The structure is documented below. + items: + properties: + journal: + description: The durability journal to ensure + data files remain valid and recoverable. The + structure is documented below. + items: + properties: + commitInterval: + description: The maximum amount of time + in milliseconds that the mongod process + allows between journal operations. For + more information, see the storage.journal.commitIntervalMs + description in the official documentation. + type: number + type: object + type: array + wiredTiger: + description: The WiredTiger engine settings. + (see the storage.wiredTiger option). These + settings available only on mongod hosts. The + structure is documented below. + items: + properties: + blockCompressor: + description: 'Specifies the default compression + for collection data. You can override + this on a per-collection basis when + creating collections. Available compressors + are: none, snappy, zlib, zstd. This + setting available only on mongod hosts. + For more information, see the storage.wiredTiger.collectionConfig.blockCompressor + description in the official documentation.' + type: string + cacheSizeGb: + description: Defines the maximum size + of the internal cache that WiredTiger + will use for all data. For more information, + see the storage.wiredTiger.engineConfig.cacheSizeGB + description in the official documentation. + type: number + prefixCompression: + description: Enables or disables prefix + compression for index data. Сan be either + true or false. For more information, + see the storage.wiredTiger.indexConfig.prefixCompression + description in the official documentation. + type: boolean + type: object + type: array + type: object + type: array + type: object + type: array + mongos: + description: Configuration of the mongos service. The structure + is documented below. + items: + properties: + net: + description: A set of network settings (see the net + option). The structure is documented below. + items: + properties: + compressors: + description: 'Specifies the default compressor(s) + to use for communication between this mongod + or mongos. Accepts array of compressors. Order + matters. Available compressors: snappy, zlib, + zstd, disabled. To disable network compression, + make "disabled" the only value. For more information, + see the net.Compression.Compressors description + in the official documentation.' + items: + type: string + type: array + maxIncomingConnections: + description: The maximum number of simultaneous + connections that host will accept. For more + information, see the net.maxIncomingConnections + description in the official documentation. + type: number + type: object + type: array + type: object + type: array + performanceDiagnostics: + description: Performance diagnostics to the MongoDB cluster. + The structure is documented below. + items: + properties: + enabled: + description: Enable or disable performance diagnostics. + type: boolean + type: object + type: array + version: + description: Version of the MongoDB server software. Can + be either 4.2, 4.4, 4.4-enterprise, 5.0, 5.0-enterprise, + 6.0 and 6.0-enterprise. + type: string + type: object + type: array + clusterId: + description: The ID of the cluster. + type: string + createdAt: + description: Creation timestamp of the key. + type: string + database: + description: A database of the MongoDB cluster. The structure + is documented below. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the MongoDB cluster. + type: string + diskSizeAutoscalingMongocfg: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + diskSizeAutoscalingMongod: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + diskSizeAutoscalingMongoinfra: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + diskSizeAutoscalingMongos: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + environment: + description: Deployment environment of the MongoDB cluster. Can + be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + health: + description: Aggregated health of the cluster. Can be either ALIVE, + DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health + field of JSON representation in the official documentation. + type: string + host: + description: A host of the MongoDB cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: Should this host have assigned public IP assigned. + Can be either true or false. + type: boolean + health: + description: (Computed) The health of the host. + type: string + hostParameters: + description: The parameters of mongod host in replicaset. + items: + properties: + hidden: + description: Should this host be hidden in replicaset. + Can be either true of false. For more information + see the official documentation + type: boolean + priority: + description: A floating point number that indicates + the relative likelihood of a replica set member + to become the primary. For more information see + the official documentation + type: number + secondaryDelaySecs: + description: The number of seconds "behind" the primary + that this replica set member should "lag". For more + information see the official documentation + type: number + tags: + additionalProperties: + type: string + description: A set of key/value pairs to assign for + the replica set member. For more information see + the official documentation + type: object + x-kubernetes-map-type: granular + type: object + type: array + name: + description: (Computed) The fully qualified domain name + of the host. Computed on server side. + type: string + role: + description: The role of the cluster (either PRIMARY or + SECONDARY). + type: string + shardName: + description: The name of the shard to which the host belongs. + Only for sharded cluster. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + type: + description: type of mongo daemon which runs on this host + (mongod, mongos, mongocfg, mongoinfra). Defaults to mongod. + type: string + zoneId: + description: The availability zone where the MongoDB host + will be created. For more information see the official + documentation. + type: string + type: object + type: array + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the MongoDB + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance window settings of the MongoDB cluster. + The structure is documented below. + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the MongoDB cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the MongoDB cluster belongs. + type: string + resources: + description: (DEPRECATED, use resources_* instead) Resources allocated + to hosts of the MongoDB cluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongocfg: + description: Resources allocated to mongocfg hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongod: + description: Resources allocated to mongod hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongoinfra: + description: Resources allocated to mongoinfra hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + resourcesMongos: + description: Resources allocated to mongos hosts of the MongoDB + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MongoDB + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MongoDB hosts. For more + information see the official documentation. + type: string + resourcePresetId: + type: string + type: object + type: array + restore: + description: The cluster will be created from the specified backup. + The structure is documented below. + items: + properties: + backupId: + description: Backup ID. The cluster will be created from + the specified backup. How to get a list of PostgreSQL + backups + type: string + time: + description: 'Timestamp of the moment to which the MongoDB + cluster should be restored. (Format: "2006-01-02T15:04:05" + - UTC). When not set, current time is used.' + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + sharded: + description: MongoDB Cluster mode enabled/disabled. + type: boolean + status: + description: Status of the cluster. Can be either CREATING, STARTING, + RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. + For more information see status field of JSON representation + in the official documentation. + type: string + user: + description: A user of the MongoDB cluster. The structure is documented + below. + items: + properties: + name: + description: The name of the user. + type: string + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: The roles of the user in this database. + For more information see the official documentation. + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbdatabases.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbdatabases.yaml new file mode 100644 index 0000000..5167fa9 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbdatabases.yaml @@ -0,0 +1,494 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: mongodbdatabases.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: MongodbDatabase + listKind: MongodbDatabaseList + plural: mongodbdatabases + singular: mongodbdatabase + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: MongodbDatabase is the Schema for the MongodbDatabases API. Manages + a MongoDB database within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MongodbDatabaseSpec defines the desired state of MongodbDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a MongodbCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a MongodbCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the database. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a MongodbCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a MongodbCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the database. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: MongodbDatabaseStatus defines the observed state of MongodbDatabase. + properties: + atProvider: + properties: + clusterId: + type: string + id: + type: string + name: + description: The name of the database. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbusers.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbusers.yaml new file mode 100644 index 0000000..3372a45 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbusers.yaml @@ -0,0 +1,587 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: mongodbusers.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: MongodbUser + listKind: MongodbUserList + plural: mongodbusers + singular: mongodbuser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: MongodbUser is the Schema for the MongodbUsers API. Manages a + MongoDB user within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MongodbUserSpec defines the desired state of MongodbUser + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a MongodbCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a MongodbCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: List of strings. The roles of the user in this + database. For more information see the official documentation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a MongodbCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a MongodbCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: List of strings. The roles of the user in this + database. For more information see the official documentation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + required: + - passwordSecretRef + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.passwordSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)' + status: + description: MongodbUserStatus defines the observed state of MongodbUser. + properties: + atProvider: + properties: + clusterId: + type: string + id: + type: string + name: + description: The name of the user. + type: string + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: List of strings. The roles of the user in this + database. For more information see the official documentation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mysqlclusters.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mysqlclusters.yaml new file mode 100644 index 0000000..20d0867 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mysqlclusters.yaml @@ -0,0 +1,1811 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: mysqlclusters.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: MySQLCluster + listKind: MySQLClusterList + plural: mysqlclusters + singular: mysqlcluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: MySQLCluster is the Schema for the MySQLClusters API. Manages + a MySQL cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MySQLClusterSpec defines the desired state of MySQLCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + access: + description: Access policy to the MySQL cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + webSql: + description: Allows access for SQL queries in the management + console. + type: boolean + type: object + type: array + allowRegenerationHost: + description: A host of the MySQL cluster. The structure is documented + below. + type: boolean + backupRetainPeriodDays: + description: The period in days during which backups are stored. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC. The structure + is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + database: + description: (Deprecated) To manage databases, please switch to + using a separate resource type yandex_mdb_mysql_databases. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the MySQL cluster. + type: string + environment: + description: Deployment environment of the MySQL cluster. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the MySQL cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address. It can be changed on the fly only when name is + set. + type: boolean + backupPriority: + description: Host backup priority. Value is between 0 and + 100, default is 0. + type: number + name: + description: Host state name. It should be set for all hosts + or unset for all hosts. This field can be used by another + host, to select which host will be its replication source. + Please refer to replication_source_name parameter. + type: string + priority: + description: Host master promotion priority. Value is between + 0 and 100, default is 0. + type: number + replicationSourceName: + description: Host replication source name points to host's + name from which this host should replicate. When not set + then host in HA group. It works only when name is set. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the MySQL host + will be created. + type: string + type: object + type: array + hostGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the MySQL + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the MySQL cluster. The structure + is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 0 and 23. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + mysqlConfig: + additionalProperties: + type: string + description: MySQL cluster config. Detail info in "MySQL config" + section (documented below). + type: object + x-kubernetes-map-type: granular + name: + description: Name of the MySQL cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the MySQL cluster uses. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + performanceDiagnostics: + description: Cluster performance diagnostics settings. The structure + is documented below. YC Documentation + items: + properties: + enabled: + description: Enable performance diagnostics + type: boolean + sessionsSamplingInterval: + description: Interval (in seconds) for my_stat_activity + sampling Acceptable values are 1 to 86400, inclusive. + type: number + statementsSamplingInterval: + description: Interval (in seconds) for my_stat_statements + sampling Acceptable values are 1 to 86400, inclusive. + type: number + type: object + type: array + resources: + description: Resources allocated to hosts of the MySQL cluster. + The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MySQL + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MySQL hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + restore: + description: The cluster will be created from the specified backup. + The structure is documented below. + items: + properties: + backupId: + description: Backup ID. The cluster will be created from + the specified backup. How to get a list of MySQL backups. + type: string + time: + description: 'Timestamp of the moment to which the MySQL + cluster should be restored. (Format: "2006-01-02T15:04:05" + - UTC). When not set, current time is used.' + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + user: + description: (Deprecated) To manage users, please switch to using + a separate resource type yandex_mdb_mysql_user. + items: + properties: + authenticationPlugin: + description: 'Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, + CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 + MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD)' + type: string + connectionLimits: + description: User's connection limits. The structure is + documented below. If the attribute is not specified there + will be no changes. + items: + properties: + maxConnectionsPerHour: + description: Max connections per hour. + type: number + maxQuestionsPerHour: + description: Max questions per hour. + type: number + maxUpdatesPerHour: + description: Max updates per hour. + type: number + maxUserConnections: + description: Max user connections. + type: number + type: object + type: array + globalPermissions: + description: |- + List user's global permissions + Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: 'List user''s roles in the database. + Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, + CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE.' + items: + type: string + type: array + type: object + type: array + type: object + type: array + version: + description: 'Version of the MySQL cluster. (allowed versions + are: 5.7, 8.0)' + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + access: + description: Access policy to the MySQL cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + webSql: + description: Allows access for SQL queries in the management + console. + type: boolean + type: object + type: array + allowRegenerationHost: + description: A host of the MySQL cluster. The structure is documented + below. + type: boolean + backupRetainPeriodDays: + description: The period in days during which backups are stored. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC. The structure + is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + database: + description: (Deprecated) To manage databases, please switch to + using a separate resource type yandex_mdb_mysql_databases. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the MySQL cluster. + type: string + environment: + description: Deployment environment of the MySQL cluster. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the MySQL cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address. It can be changed on the fly only when name is + set. + type: boolean + backupPriority: + description: Host backup priority. Value is between 0 and + 100, default is 0. + type: number + name: + description: Host state name. It should be set for all hosts + or unset for all hosts. This field can be used by another + host, to select which host will be its replication source. + Please refer to replication_source_name parameter. + type: string + priority: + description: Host master promotion priority. Value is between + 0 and 100, default is 0. + type: number + replicationSourceName: + description: Host replication source name points to host's + name from which this host should replicate. When not set + then host in HA group. It works only when name is set. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the MySQL host + will be created. + type: string + type: object + type: array + hostGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the MySQL + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the MySQL cluster. The structure + is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 0 and 23. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + mysqlConfig: + additionalProperties: + type: string + description: MySQL cluster config. Detail info in "MySQL config" + section (documented below). + type: object + x-kubernetes-map-type: granular + name: + description: Name of the MySQL cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the MySQL cluster uses. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + performanceDiagnostics: + description: Cluster performance diagnostics settings. The structure + is documented below. YC Documentation + items: + properties: + enabled: + description: Enable performance diagnostics + type: boolean + sessionsSamplingInterval: + description: Interval (in seconds) for my_stat_activity + sampling Acceptable values are 1 to 86400, inclusive. + type: number + statementsSamplingInterval: + description: Interval (in seconds) for my_stat_statements + sampling Acceptable values are 1 to 86400, inclusive. + type: number + type: object + type: array + resources: + description: Resources allocated to hosts of the MySQL cluster. + The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MySQL + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MySQL hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + restore: + description: The cluster will be created from the specified backup. + The structure is documented below. + items: + properties: + backupId: + description: Backup ID. The cluster will be created from + the specified backup. How to get a list of MySQL backups. + type: string + time: + description: 'Timestamp of the moment to which the MySQL + cluster should be restored. (Format: "2006-01-02T15:04:05" + - UTC). When not set, current time is used.' + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + user: + description: (Deprecated) To manage users, please switch to using + a separate resource type yandex_mdb_mysql_user. + items: + properties: + authenticationPlugin: + description: 'Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, + CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 + MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD)' + type: string + connectionLimits: + description: User's connection limits. The structure is + documented below. If the attribute is not specified there + will be no changes. + items: + properties: + maxConnectionsPerHour: + description: Max connections per hour. + type: number + maxQuestionsPerHour: + description: Max questions per hour. + type: number + maxUpdatesPerHour: + description: Max updates per hour. + type: number + maxUserConnections: + description: Max user connections. + type: number + type: object + type: array + globalPermissions: + description: |- + List user's global permissions + Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: 'List user''s roles in the database. + Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, + CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE.' + items: + type: string + type: array + type: object + type: array + required: + - passwordSecretRef + type: object + type: array + version: + description: 'Version of the MySQL cluster. (allowed versions + are: 5.7, 8.0)' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.environment is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.environment) + || (has(self.initProvider) && has(self.initProvider.environment))' + - message: spec.forProvider.host is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.host) + || (has(self.initProvider) && has(self.initProvider.host))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.resources is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resources) + || (has(self.initProvider) && has(self.initProvider.resources))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: MySQLClusterStatus defines the observed state of MySQLCluster. + properties: + atProvider: + properties: + access: + description: Access policy to the MySQL cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + webSql: + description: Allows access for SQL queries in the management + console. + type: boolean + type: object + type: array + allowRegenerationHost: + description: A host of the MySQL cluster. The structure is documented + below. + type: boolean + backupRetainPeriodDays: + description: The period in days during which backups are stored. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC. The structure + is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + createdAt: + description: Creation timestamp of the cluster. + type: string + database: + description: (Deprecated) To manage databases, please switch to + using a separate resource type yandex_mdb_mysql_databases. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the MySQL cluster. + type: string + environment: + description: Deployment environment of the MySQL cluster. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + health: + description: Aggregated health of the cluster. + type: string + host: + description: A host of the MySQL cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address. It can be changed on the fly only when name is + set. + type: boolean + backupPriority: + description: Host backup priority. Value is between 0 and + 100, default is 0. + type: number + fqdn: + description: (Computed) The fully qualified domain name + of the host. + type: string + name: + description: Host state name. It should be set for all hosts + or unset for all hosts. This field can be used by another + host, to select which host will be its replication source. + Please refer to replication_source_name parameter. + type: string + priority: + description: Host master promotion priority. Value is between + 0 and 100, default is 0. + type: number + replicationSource: + description: (Computed) Host replication source (fqdn), + when replication_source is empty then host is in HA group. + type: string + replicationSourceName: + description: Host replication source name points to host's + name from which this host should replicate. When not set + then host in HA group. It works only when name is set. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + zone: + description: The availability zone where the MySQL host + will be created. + type: string + type: object + type: array + hostGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the MySQL + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the MySQL cluster. The structure + is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 0 and 23. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + mysqlConfig: + additionalProperties: + type: string + description: MySQL cluster config. Detail info in "MySQL config" + section (documented below). + type: object + x-kubernetes-map-type: granular + name: + description: Name of the MySQL cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the MySQL cluster uses. + type: string + performanceDiagnostics: + description: Cluster performance diagnostics settings. The structure + is documented below. YC Documentation + items: + properties: + enabled: + description: Enable performance diagnostics + type: boolean + sessionsSamplingInterval: + description: Interval (in seconds) for my_stat_activity + sampling Acceptable values are 1 to 86400, inclusive. + type: number + statementsSamplingInterval: + description: Interval (in seconds) for my_stat_statements + sampling Acceptable values are 1 to 86400, inclusive. + type: number + type: object + type: array + resources: + description: Resources allocated to hosts of the MySQL cluster. + The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a MySQL + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of MySQL hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + restore: + description: The cluster will be created from the specified backup. + The structure is documented below. + items: + properties: + backupId: + description: Backup ID. The cluster will be created from + the specified backup. How to get a list of MySQL backups. + type: string + time: + description: 'Timestamp of the moment to which the MySQL + cluster should be restored. (Format: "2006-01-02T15:04:05" + - UTC). When not set, current time is used.' + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + status: + description: Status of the cluster. + type: string + user: + description: (Deprecated) To manage users, please switch to using + a separate resource type yandex_mdb_mysql_user. + items: + properties: + authenticationPlugin: + description: 'Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, + CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 + MYSQL_NATIVE_PASSWORD, SHA256_PASSWORD)' + type: string + connectionLimits: + description: User's connection limits. The structure is + documented below. If the attribute is not specified there + will be no changes. + items: + properties: + maxConnectionsPerHour: + description: Max connections per hour. + type: number + maxQuestionsPerHour: + description: Max questions per hour. + type: number + maxUpdatesPerHour: + description: Max updates per hour. + type: number + maxUserConnections: + description: Max user connections. + type: number + type: object + type: array + globalPermissions: + description: |- + List user's global permissions + Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the user. + type: string + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: 'List user''s roles in the database. + Allowed roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, + CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE.' + items: + type: string + type: array + type: object + type: array + type: object + type: array + version: + description: 'Version of the MySQL cluster. (allowed versions + are: 5.7, 8.0)' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mysqldatabases.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mysqldatabases.yaml new file mode 100644 index 0000000..b766cd9 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mysqldatabases.yaml @@ -0,0 +1,494 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: mysqldatabases.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: MySQLDatabase + listKind: MySQLDatabaseList + plural: mysqldatabases + singular: mysqldatabase + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: MySQLDatabase is the Schema for the MySQLDatabases API. Manages + a MySQL database within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MySQLDatabaseSpec defines the desired state of MySQLDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a MySQLCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a MySQLCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the database. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a MySQLCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a MySQLCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the database. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: MySQLDatabaseStatus defines the observed state of MySQLDatabase. + properties: + atProvider: + properties: + clusterId: + type: string + id: + type: string + name: + description: The name of the database. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mysqlusers.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mysqlusers.yaml new file mode 100644 index 0000000..22fc86c --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mysqlusers.yaml @@ -0,0 +1,835 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: mysqlusers.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: MySQLUser + listKind: MySQLUserList + plural: mysqlusers + singular: mysqluser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: MySQLUser is the Schema for the MySQLUsers API. Manages a MySQL + user within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MySQLUserSpec defines the desired state of MySQLUser + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authenticationPlugin: + description: 'Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, + CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, + SHA256_PASSWORD)' + type: string + clusterId: + type: string + clusterIdRef: + description: Reference to a MySQLCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a MySQLCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectionLimits: + description: User's connection limits. The structure is documented + below. If the attribute is not specified there will be no changes. + items: + properties: + maxConnectionsPerHour: + description: Max connections per hour. + type: number + maxQuestionsPerHour: + description: Max questions per hour. + type: number + maxUpdatesPerHour: + description: Max updates per hour. + type: number + maxUserConnections: + description: Max user connections. + type: number + type: object + type: array + globalPermissions: + description: |- + List user's global permissions + Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + databaseNameRef: + description: Reference to a MySQLDatabase in mdb to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a MySQLDatabase in mdb to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roles: + description: 'List user''s roles in the database. Allowed + roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, + CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE.' + items: + type: string + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authenticationPlugin: + description: 'Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, + CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, + SHA256_PASSWORD)' + type: string + clusterId: + type: string + clusterIdRef: + description: Reference to a MySQLCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a MySQLCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectionLimits: + description: User's connection limits. The structure is documented + below. If the attribute is not specified there will be no changes. + items: + properties: + maxConnectionsPerHour: + description: Max connections per hour. + type: number + maxQuestionsPerHour: + description: Max questions per hour. + type: number + maxUpdatesPerHour: + description: Max updates per hour. + type: number + maxUserConnections: + description: Max user connections. + type: number + type: object + type: array + globalPermissions: + description: |- + List user's global permissions + Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + databaseNameRef: + description: Reference to a MySQLDatabase in mdb to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a MySQLDatabase in mdb to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roles: + description: 'List user''s roles in the database. Allowed + roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, + CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE.' + items: + type: string + type: array + type: object + type: array + required: + - passwordSecretRef + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.passwordSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)' + status: + description: MySQLUserStatus defines the observed state of MySQLUser. + properties: + atProvider: + properties: + authenticationPlugin: + description: 'Authentication plugin. Allowed values: MYSQL_NATIVE_PASSWORD, + CACHING_SHA2_PASSWORD, SHA256_PASSWORD (for version 5.7 MYSQL_NATIVE_PASSWORD, + SHA256_PASSWORD)' + type: string + clusterId: + type: string + connectionLimits: + description: User's connection limits. The structure is documented + below. If the attribute is not specified there will be no changes. + items: + properties: + maxConnectionsPerHour: + description: Max connections per hour. + type: number + maxQuestionsPerHour: + description: Max questions per hour. + type: number + maxUpdatesPerHour: + description: Max updates per hour. + type: number + maxUserConnections: + description: Max user connections. + type: number + type: object + type: array + globalPermissions: + description: |- + List user's global permissions + Allowed permissions: REPLICATION_CLIENT, REPLICATION_SLAVE, PROCESS for clear list use empty list. If the attribute is not specified there will be no changes. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + type: string + name: + description: The name of the user. + type: string + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: 'List user''s roles in the database. Allowed + roles: ALL,ALTER,ALTER_ROUTINE,CREATE,CREATE_ROUTINE,CREATE_TEMPORARY_TABLES, + CREATE_VIEW,DELETE,DROP,EVENT,EXECUTE,INDEX,INSERT,LOCK_TABLES,SELECT,SHOW_VIEW,TRIGGER,UPDATE.' + items: + type: string + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_postgresqlclusters.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_postgresqlclusters.yaml new file mode 100644 index 0000000..08c0b4a --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_postgresqlclusters.yaml @@ -0,0 +1,1967 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: postgresqlclusters.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: PostgresqlCluster + listKind: PostgresqlClusterList + plural: postgresqlclusters + singular: postgresqlcluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PostgresqlCluster is the Schema for the PostgresqlClusters API. + Manages a PostgreSQL cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PostgresqlClusterSpec defines the desired state of PostgresqlCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + config: + description: Configuration of the PostgreSQL cluster. The structure + is documented below. + items: + properties: + access: + description: Access policy to the PostgreSQL cluster. The + structure is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + serverless: + description: Allow access for connection to managed + databases from functions + type: boolean + webSql: + description: Allow access for SQL queries in the management + console + type: boolean + type: object + type: array + autofailover: + description: Configuration setting which enables/disables + autofailover in cluster. + type: boolean + backupRetainPeriodDays: + description: The period in days during which backups are + stored. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC + timezone. The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started + (UTC). + type: number + minutes: + description: The minute at which backup will be started + (UTC). + type: number + type: object + type: array + diskSizeAutoscaling: + description: Cluster disk size autoscaling settings. The + structure is documented below. + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling + (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage + (percent). + type: number + type: object + type: array + performanceDiagnostics: + description: Cluster performance diagnostics settings. The + structure is documented below. YC Documentation + items: + properties: + enabled: + description: Enable performance diagnostics + type: boolean + sessionsSamplingInterval: + description: Interval (in seconds) for pg_stat_activity + sampling Acceptable values are 1 to 86400, inclusive. + type: number + statementsSamplingInterval: + description: Interval (in seconds) for pg_stat_statements + sampling Acceptable values are 1 to 86400, inclusive. + type: number + type: object + type: array + poolerConfig: + description: Configuration of the connection pooler. The + structure is documented below. + items: + properties: + poolDiscard: + description: Setting pool_discard parameter in Odyssey. + type: boolean + poolingMode: + description: Mode that the connection pooler is working + in. See descriptions of all modes in the [documentation + for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + type: string + type: object + type: array + postgresqlConfig: + additionalProperties: + type: string + description: PostgreSQL cluster config. Detail info in "postresql + config" section (documented below). + type: object + x-kubernetes-map-type: granular + resources: + description: Resources allocated to hosts of the PostgreSQL + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + PostgreSQL host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of PostgreSQL hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + version: + description: 'Version of the PostgreSQL cluster. (allowed + versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, 13-1c, + 14, 14-1c, 15, 15-1c, 16)' + type: string + type: object + type: array + database: + description: (Deprecated) To manage databases, please switch to + using a separate resource type yandex_mdb_postgresql_database. + items: + properties: + extension: + items: + properties: + name: + description: Name of the PostgreSQL cluster. Provided + by the client when the cluster is created. + type: string + version: + description: 'Version of the PostgreSQL cluster. (allowed + versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, + 13-1c, 14, 14-1c, 15, 15-1c, 16)' + type: string + type: object + type: array + lcCollate: + type: string + lcType: + type: string + name: + description: Name of the PostgreSQL cluster. Provided by + the client when the cluster is created. + type: string + owner: + type: string + templateDb: + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the PostgreSQL cluster. + type: string + environment: + description: Deployment environment of the PostgreSQL cluster. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is unset, the default provider folder_id is used for create. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the PostgreSQL cluster. The structure is + documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. It can be changed on the fly only + when name is set. + type: boolean + name: + description: Host state name. It should be set for all hosts + or unset for all hosts. This field can be used by another + host, to select which host will be its replication source. + Please see replication_source_name parameter. Also, this + field is used to select which host will be selected as + a master host. Please see host_master_name parameter. + type: string + priority: + description: Host priority in HA group. It works only when + name is set. + type: number + replicationSourceName: + description: Host replication source name points to host's + name from which this host should replicate. When not set + then host in HA group. It works only when name is set. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the PostgreSQL + host will be created. + type: string + type: object + type: array + hostGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + hostMasterName: + description: It sets name of master host. It works only when host.name + is set. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the PostgreSQL + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the PostgreSQL cluster. The + structure is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 1 and 24. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the PostgreSQL cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the PostgreSQL cluster + belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restore: + description: The cluster will be created from the specified backup. + The structure is documented below. + items: + properties: + backupId: + description: Backup ID. The cluster will be created from + the specified backup. How to get a list of PostgreSQL + backups. + type: string + time: + description: 'Timestamp of the moment to which the PostgreSQL + cluster should be restored. (Format: "2006-01-02T15:04:05" + - UTC). When not set, current time is used.' + type: string + timeInclusive: + description: |- + Flag that indicates whether a database should be restored to the first backup point available just after the timestamp specified in the [time] field instead of just before. + Possible values: + type: boolean + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + user: + description: (Deprecated) To manage users, please switch to using + a separate resource type yandex_mdb_postgresql_user. + items: + properties: + connLimit: + type: number + grants: + items: + type: string + type: array + login: + type: boolean + name: + description: Name of the PostgreSQL cluster. Provided by + the client when the cluster is created. + type: string + passwordSecretRef: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + items: + properties: + databaseName: + description: Name of the PostgreSQL cluster. Provided + by the client when the cluster is created. + type: string + type: object + type: array + settings: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + config: + description: Configuration of the PostgreSQL cluster. The structure + is documented below. + items: + properties: + access: + description: Access policy to the PostgreSQL cluster. The + structure is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + serverless: + description: Allow access for connection to managed + databases from functions + type: boolean + webSql: + description: Allow access for SQL queries in the management + console + type: boolean + type: object + type: array + autofailover: + description: Configuration setting which enables/disables + autofailover in cluster. + type: boolean + backupRetainPeriodDays: + description: The period in days during which backups are + stored. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC + timezone. The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started + (UTC). + type: number + minutes: + description: The minute at which backup will be started + (UTC). + type: number + type: object + type: array + diskSizeAutoscaling: + description: Cluster disk size autoscaling settings. The + structure is documented below. + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling + (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage + (percent). + type: number + type: object + type: array + performanceDiagnostics: + description: Cluster performance diagnostics settings. The + structure is documented below. YC Documentation + items: + properties: + enabled: + description: Enable performance diagnostics + type: boolean + sessionsSamplingInterval: + description: Interval (in seconds) for pg_stat_activity + sampling Acceptable values are 1 to 86400, inclusive. + type: number + statementsSamplingInterval: + description: Interval (in seconds) for pg_stat_statements + sampling Acceptable values are 1 to 86400, inclusive. + type: number + type: object + type: array + poolerConfig: + description: Configuration of the connection pooler. The + structure is documented below. + items: + properties: + poolDiscard: + description: Setting pool_discard parameter in Odyssey. + type: boolean + poolingMode: + description: Mode that the connection pooler is working + in. See descriptions of all modes in the [documentation + for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + type: string + type: object + type: array + postgresqlConfig: + additionalProperties: + type: string + description: PostgreSQL cluster config. Detail info in "postresql + config" section (documented below). + type: object + x-kubernetes-map-type: granular + resources: + description: Resources allocated to hosts of the PostgreSQL + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + PostgreSQL host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of PostgreSQL hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + version: + description: 'Version of the PostgreSQL cluster. (allowed + versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, 13-1c, + 14, 14-1c, 15, 15-1c, 16)' + type: string + type: object + type: array + database: + description: (Deprecated) To manage databases, please switch to + using a separate resource type yandex_mdb_postgresql_database. + items: + properties: + extension: + items: + properties: + name: + description: Name of the PostgreSQL cluster. Provided + by the client when the cluster is created. + type: string + version: + description: 'Version of the PostgreSQL cluster. (allowed + versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, + 13-1c, 14, 14-1c, 15, 15-1c, 16)' + type: string + type: object + type: array + lcCollate: + type: string + lcType: + type: string + name: + description: Name of the PostgreSQL cluster. Provided by + the client when the cluster is created. + type: string + owner: + type: string + templateDb: + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the PostgreSQL cluster. + type: string + environment: + description: Deployment environment of the PostgreSQL cluster. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is unset, the default provider folder_id is used for create. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the PostgreSQL cluster. The structure is + documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. It can be changed on the fly only + when name is set. + type: boolean + name: + description: Host state name. It should be set for all hosts + or unset for all hosts. This field can be used by another + host, to select which host will be its replication source. + Please see replication_source_name parameter. Also, this + field is used to select which host will be selected as + a master host. Please see host_master_name parameter. + type: string + priority: + description: Host priority in HA group. It works only when + name is set. + type: number + replicationSourceName: + description: Host replication source name points to host's + name from which this host should replicate. When not set + then host in HA group. It works only when name is set. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the PostgreSQL + host will be created. + type: string + type: object + type: array + hostGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + hostMasterName: + description: It sets name of master host. It works only when host.name + is set. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the PostgreSQL + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the PostgreSQL cluster. The + structure is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 1 and 24. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the PostgreSQL cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the PostgreSQL cluster + belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restore: + description: The cluster will be created from the specified backup. + The structure is documented below. + items: + properties: + backupId: + description: Backup ID. The cluster will be created from + the specified backup. How to get a list of PostgreSQL + backups. + type: string + time: + description: 'Timestamp of the moment to which the PostgreSQL + cluster should be restored. (Format: "2006-01-02T15:04:05" + - UTC). When not set, current time is used.' + type: string + timeInclusive: + description: |- + Flag that indicates whether a database should be restored to the first backup point available just after the timestamp specified in the [time] field instead of just before. + Possible values: + type: boolean + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + user: + description: (Deprecated) To manage users, please switch to using + a separate resource type yandex_mdb_postgresql_user. + items: + properties: + connLimit: + type: number + grants: + items: + type: string + type: array + login: + type: boolean + name: + description: Name of the PostgreSQL cluster. Provided by + the client when the cluster is created. + type: string + passwordSecretRef: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + items: + properties: + databaseName: + description: Name of the PostgreSQL cluster. Provided + by the client when the cluster is created. + type: string + type: object + type: array + settings: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + required: + - passwordSecretRef + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.config is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.config) + || (has(self.initProvider) && has(self.initProvider.config))' + - message: spec.forProvider.environment is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.environment) + || (has(self.initProvider) && has(self.initProvider.environment))' + - message: spec.forProvider.host is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.host) + || (has(self.initProvider) && has(self.initProvider.host))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: PostgresqlClusterStatus defines the observed state of PostgresqlCluster. + properties: + atProvider: + properties: + config: + description: Configuration of the PostgreSQL cluster. The structure + is documented below. + items: + properties: + access: + description: Access policy to the PostgreSQL cluster. The + structure is documented below. + items: + properties: + dataLens: + description: Allow access for Yandex DataLens. + type: boolean + dataTransfer: + description: Allow access for DataTransfer + type: boolean + serverless: + description: Allow access for connection to managed + databases from functions + type: boolean + webSql: + description: Allow access for SQL queries in the management + console + type: boolean + type: object + type: array + autofailover: + description: Configuration setting which enables/disables + autofailover in cluster. + type: boolean + backupRetainPeriodDays: + description: The period in days during which backups are + stored. + type: number + backupWindowStart: + description: Time to start the daily backup, in the UTC + timezone. The structure is documented below. + items: + properties: + hours: + description: The hour at which backup will be started + (UTC). + type: number + minutes: + description: The minute at which backup will be started + (UTC). + type: number + type: object + type: array + diskSizeAutoscaling: + description: Cluster disk size autoscaling settings. The + structure is documented below. + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling + (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage + (percent). + type: number + type: object + type: array + performanceDiagnostics: + description: Cluster performance diagnostics settings. The + structure is documented below. YC Documentation + items: + properties: + enabled: + description: Enable performance diagnostics + type: boolean + sessionsSamplingInterval: + description: Interval (in seconds) for pg_stat_activity + sampling Acceptable values are 1 to 86400, inclusive. + type: number + statementsSamplingInterval: + description: Interval (in seconds) for pg_stat_statements + sampling Acceptable values are 1 to 86400, inclusive. + type: number + type: object + type: array + poolerConfig: + description: Configuration of the connection pooler. The + structure is documented below. + items: + properties: + poolDiscard: + description: Setting pool_discard parameter in Odyssey. + type: boolean + poolingMode: + description: Mode that the connection pooler is working + in. See descriptions of all modes in the [documentation + for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string. + type: string + type: object + type: array + postgresqlConfig: + additionalProperties: + type: string + description: PostgreSQL cluster config. Detail info in "postresql + config" section (documented below). + type: object + x-kubernetes-map-type: granular + resources: + description: Resources allocated to hosts of the PostgreSQL + cluster. The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a + PostgreSQL host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of PostgreSQL hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + version: + description: 'Version of the PostgreSQL cluster. (allowed + versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, 13-1c, + 14, 14-1c, 15, 15-1c, 16)' + type: string + type: object + type: array + createdAt: + description: Timestamp of cluster creation. + type: string + database: + description: (Deprecated) To manage databases, please switch to + using a separate resource type yandex_mdb_postgresql_database. + items: + properties: + extension: + items: + properties: + name: + description: Name of the PostgreSQL cluster. Provided + by the client when the cluster is created. + type: string + version: + description: 'Version of the PostgreSQL cluster. (allowed + versions are: 10, 10-1c, 11, 11-1c, 12, 12-1c, 13, + 13-1c, 14, 14-1c, 15, 15-1c, 16)' + type: string + type: object + type: array + lcCollate: + type: string + lcType: + type: string + name: + description: Name of the PostgreSQL cluster. Provided by + the client when the cluster is created. + type: string + owner: + type: string + templateDb: + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the PostgreSQL cluster. + type: string + environment: + description: Deployment environment of the PostgreSQL cluster. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is unset, the default provider folder_id is used for create. + type: string + health: + description: Aggregated health of the cluster. + type: string + host: + description: A host of the PostgreSQL cluster. The structure is + documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. It can be changed on the fly only + when name is set. + type: boolean + fqdn: + description: (Computed) The fully qualified domain name + of the host. + type: string + name: + description: Host state name. It should be set for all hosts + or unset for all hosts. This field can be used by another + host, to select which host will be its replication source. + Please see replication_source_name parameter. Also, this + field is used to select which host will be selected as + a master host. Please see host_master_name parameter. + type: string + priority: + description: Host priority in HA group. It works only when + name is set. + type: number + replicationSource: + description: (Computed) Host replication source (fqdn), + when replication_source is empty then host is in HA group. + type: string + replicationSourceName: + description: Host replication source name points to host's + name from which this host should replicate. When not set + then host in HA group. It works only when name is set. + type: string + role: + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + zone: + description: The availability zone where the PostgreSQL + host will be created. + type: string + type: object + type: array + hostGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + hostMasterName: + description: It sets name of master host. It works only when host.name + is set. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the PostgreSQL + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + description: Maintenance policy of the PostgreSQL cluster. The + structure is documented below. + items: + properties: + day: + description: 'Day of the week (in DDD format). Allowed values: + "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"' + type: string + hour: + description: Hour of the day in UTC (in HH format). Allowed + value is between 1 and 24. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the PostgreSQL cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the PostgreSQL cluster + belongs. + type: string + restore: + description: The cluster will be created from the specified backup. + The structure is documented below. + items: + properties: + backupId: + description: Backup ID. The cluster will be created from + the specified backup. How to get a list of PostgreSQL + backups. + type: string + time: + description: 'Timestamp of the moment to which the PostgreSQL + cluster should be restored. (Format: "2006-01-02T15:04:05" + - UTC). When not set, current time is used.' + type: string + timeInclusive: + description: |- + Flag that indicates whether a database should be restored to the first backup point available just after the timestamp specified in the [time] field instead of just before. + Possible values: + type: boolean + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + status: + description: Status of the cluster. + type: string + user: + description: (Deprecated) To manage users, please switch to using + a separate resource type yandex_mdb_postgresql_user. + items: + properties: + connLimit: + type: number + grants: + items: + type: string + type: array + login: + type: boolean + name: + description: Name of the PostgreSQL cluster. Provided by + the client when the cluster is created. + type: string + permission: + items: + properties: + databaseName: + description: Name of the PostgreSQL cluster. Provided + by the client when the cluster is created. + type: string + type: object + type: array + settings: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_postgresqldatabases.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_postgresqldatabases.yaml new file mode 100644 index 0000000..4f0cf12 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_postgresqldatabases.yaml @@ -0,0 +1,745 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: postgresqldatabases.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: PostgresqlDatabase + listKind: PostgresqlDatabaseList + plural: postgresqldatabases + singular: postgresqldatabase + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PostgresqlDatabase is the Schema for the PostgresqlDatabases + API. Manages a PostgreSQL database within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PostgresqlDatabaseSpec defines the desired state of PostgresqlDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a PostgresqlCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a PostgresqlCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deletionProtection: + description: Inhibits deletion of the database. Can either be + true, false or unspecified. + type: string + extension: + description: Set of database extensions. The structure is documented + below + items: + properties: + name: + description: Name of the database extension. For more information + on available extensions see the official documentation. + type: string + version: + description: Version of the extension. + type: string + type: object + type: array + lcCollate: + description: POSIX locale for string sorting order. Forbidden + to change in an existing database. + type: string + lcType: + description: POSIX locale for character classification. Forbidden + to change in an existing database. + type: string + name: + description: The name of the database. + type: string + owner: + description: Name of the user assigned as the owner of the database. + Forbidden to change in an existing database. + type: string + ownerRef: + description: Reference to a PostgresqlUser in mdb to populate + owner. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ownerSelector: + description: Selector for a PostgresqlUser in mdb to populate + owner. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + templateDb: + description: Name of the template database. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a PostgresqlCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a PostgresqlCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deletionProtection: + description: Inhibits deletion of the database. Can either be + true, false or unspecified. + type: string + extension: + description: Set of database extensions. The structure is documented + below + items: + properties: + name: + description: Name of the database extension. For more information + on available extensions see the official documentation. + type: string + version: + description: Version of the extension. + type: string + type: object + type: array + lcCollate: + description: POSIX locale for string sorting order. Forbidden + to change in an existing database. + type: string + lcType: + description: POSIX locale for character classification. Forbidden + to change in an existing database. + type: string + name: + description: The name of the database. + type: string + owner: + description: Name of the user assigned as the owner of the database. + Forbidden to change in an existing database. + type: string + ownerRef: + description: Reference to a PostgresqlUser in mdb to populate + owner. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ownerSelector: + description: Selector for a PostgresqlUser in mdb to populate + owner. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + templateDb: + description: Name of the template database. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: PostgresqlDatabaseStatus defines the observed state of PostgresqlDatabase. + properties: + atProvider: + properties: + clusterId: + type: string + deletionProtection: + description: Inhibits deletion of the database. Can either be + true, false or unspecified. + type: string + extension: + description: Set of database extensions. The structure is documented + below + items: + properties: + name: + description: Name of the database extension. For more information + on available extensions see the official documentation. + type: string + version: + description: Version of the extension. + type: string + type: object + type: array + id: + type: string + lcCollate: + description: POSIX locale for string sorting order. Forbidden + to change in an existing database. + type: string + lcType: + description: POSIX locale for character classification. Forbidden + to change in an existing database. + type: string + name: + description: The name of the database. + type: string + owner: + description: Name of the user assigned as the owner of the database. + Forbidden to change in an existing database. + type: string + templateDb: + description: Name of the template database. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_postgresqlusers.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_postgresqlusers.yaml new file mode 100644 index 0000000..2901f9d --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_postgresqlusers.yaml @@ -0,0 +1,635 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: postgresqlusers.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: PostgresqlUser + listKind: PostgresqlUserList + plural: postgresqlusers + singular: postgresqluser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PostgresqlUser is the Schema for the PostgresqlUsers API. Manages + a PostgreSQL user within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PostgresqlUserSpec defines the desired state of PostgresqlUser + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a PostgresqlCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a PostgresqlCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connLimit: + description: The maximum number of connections per user. (Default + 50) + type: number + deletionProtection: + description: Inhibits deletion of the user. Can either be true, + false or unspecified. + type: string + grants: + description: List of the user's grants. + items: + type: string + type: array + login: + description: User's ability to login. + type: boolean + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + type: object + type: array + settings: + additionalProperties: + type: string + description: Map of user settings. List of settings is documented + below. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterId: + type: string + clusterIdRef: + description: Reference to a PostgresqlCluster to populate clusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterIdSelector: + description: Selector for a PostgresqlCluster to populate clusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connLimit: + description: The maximum number of connections per user. (Default + 50) + type: number + deletionProtection: + description: Inhibits deletion of the user. Can either be true, + false or unspecified. + type: string + grants: + description: List of the user's grants. + items: + type: string + type: array + login: + description: User's ability to login. + type: boolean + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + type: object + type: array + settings: + additionalProperties: + type: string + description: Map of user settings. List of settings is documented + below. + type: object + x-kubernetes-map-type: granular + required: + - passwordSecretRef + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.passwordSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)' + status: + description: PostgresqlUserStatus defines the observed state of PostgresqlUser. + properties: + atProvider: + properties: + clusterId: + type: string + connLimit: + description: The maximum number of connections per user. (Default + 50) + type: number + deletionProtection: + description: Inhibits deletion of the user. Can either be true, + false or unspecified. + type: string + grants: + description: List of the user's grants. + items: + type: string + type: array + id: + type: string + login: + description: User's ability to login. + type: boolean + name: + description: The name of the user. + type: string + permission: + description: Set of permissions granted to the user. The structure + is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + type: object + type: array + settings: + additionalProperties: + type: string + description: Map of user settings. List of settings is documented + below. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_redisclusters.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_redisclusters.yaml new file mode 100644 index 0000000..a980709 --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_redisclusters.yaml @@ -0,0 +1,1565 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: redisclusters.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: RedisCluster + listKind: RedisClusterList + plural: redisclusters + singular: rediscluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: RedisCluster is the Schema for the RedisClusters API. Manages + a Redis cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RedisClusterSpec defines the desired state of RedisCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + access: + description: Access policy to the Redis cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for DataLens. Can be either true + or false. + type: boolean + webSql: + description: Allow access for Web SQL. Can be either true + or false. + type: boolean + type: object + type: array + announceHostnames: + description: Announce fqdn instead of ip address. + type: boolean + config: + description: Configuration of the Redis cluster. The structure + is documented below. + items: + properties: + clientOutputBufferLimitNormal: + description: Normal clients output buffer limits. See redis + config file. + type: string + clientOutputBufferLimitPubsub: + description: Pubsub clients output buffer limits. See redis + config file. + type: string + databases: + description: Number of databases (changing requires redis-server + restart). + type: number + maxmemoryPercent: + description: Redis maxmemory usage in percent + type: number + maxmemoryPolicy: + description: Redis key eviction policy for a dataset that + reaches maximum memory. Can be any of the listed in the + official RedisDB documentation. + type: string + notifyKeyspaceEvents: + description: Select the events that Redis will notify among + a set of classes. + type: string + passwordSecretRef: + description: Password for the Redis cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + slowlogLogSlowerThan: + description: Log slow queries below this number in microseconds. + type: number + slowlogMaxLen: + description: Slow queries log length. + type: number + timeout: + description: Close the connection after a client is idle + for N seconds. + type: number + version: + description: Version of Redis (6.2). + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Redis cluster. + type: string + diskSizeAutoscaling: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + environment: + description: Deployment environment of the Redis cluster. Can + be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the Redis cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address or not. + type: boolean + replicaPriority: + description: Replica priority of a current replica (usable + for non-sharded only). + type: number + shardName: + description: The name of the shard to which the host belongs. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the Redis host + will be created. For more information see the official + documentation. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Redis + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the Redis cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Redis cluster belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + persistenceMode: + description: Persistence mode. + type: string + resources: + description: Resources allocated to hosts of the Redis cluster. + The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a host, + in gigabytes. + type: number + diskTypeId: + description: Type of the storage of Redis hosts - environment + default is used if missing. + type: string + resourcePresetId: + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharded: + description: Redis Cluster mode enabled/disabled. Enables sharding + when cluster non-sharded. If cluster is sharded - disabling + is not allowed. + type: boolean + tlsEnabled: + description: TLS support mode enabled/disabled. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + access: + description: Access policy to the Redis cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for DataLens. Can be either true + or false. + type: boolean + webSql: + description: Allow access for Web SQL. Can be either true + or false. + type: boolean + type: object + type: array + announceHostnames: + description: Announce fqdn instead of ip address. + type: boolean + config: + description: Configuration of the Redis cluster. The structure + is documented below. + items: + properties: + clientOutputBufferLimitNormal: + description: Normal clients output buffer limits. See redis + config file. + type: string + clientOutputBufferLimitPubsub: + description: Pubsub clients output buffer limits. See redis + config file. + type: string + databases: + description: Number of databases (changing requires redis-server + restart). + type: number + maxmemoryPercent: + description: Redis maxmemory usage in percent + type: number + maxmemoryPolicy: + description: Redis key eviction policy for a dataset that + reaches maximum memory. Can be any of the listed in the + official RedisDB documentation. + type: string + notifyKeyspaceEvents: + description: Select the events that Redis will notify among + a set of classes. + type: string + passwordSecretRef: + description: Password for the Redis cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + slowlogLogSlowerThan: + description: Log slow queries below this number in microseconds. + type: number + slowlogMaxLen: + description: Slow queries log length. + type: number + timeout: + description: Close the connection after a client is idle + for N seconds. + type: number + version: + description: Version of Redis (6.2). + type: string + required: + - passwordSecretRef + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Redis cluster. + type: string + diskSizeAutoscaling: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + environment: + description: Deployment environment of the Redis cluster. Can + be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the Redis cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address or not. + type: boolean + replicaPriority: + description: Replica priority of a current replica (usable + for non-sharded only). + type: number + shardName: + description: The name of the shard to which the host belongs. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the Redis host + will be created. For more information see the official + documentation. + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Redis + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the Redis cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Redis cluster belongs. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + persistenceMode: + description: Persistence mode. + type: string + resources: + description: Resources allocated to hosts of the Redis cluster. + The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a host, + in gigabytes. + type: number + diskTypeId: + description: Type of the storage of Redis hosts - environment + default is used if missing. + type: string + resourcePresetId: + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharded: + description: Redis Cluster mode enabled/disabled. Enables sharding + when cluster non-sharded. If cluster is sharded - disabling + is not allowed. + type: boolean + tlsEnabled: + description: TLS support mode enabled/disabled. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.config is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.config) + || (has(self.initProvider) && has(self.initProvider.config))' + - message: spec.forProvider.environment is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.environment) + || (has(self.initProvider) && has(self.initProvider.environment))' + - message: spec.forProvider.host is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.host) + || (has(self.initProvider) && has(self.initProvider.host))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.resources is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resources) + || (has(self.initProvider) && has(self.initProvider.resources))' + status: + description: RedisClusterStatus defines the observed state of RedisCluster. + properties: + atProvider: + properties: + access: + description: Access policy to the Redis cluster. The structure + is documented below. + items: + properties: + dataLens: + description: Allow access for DataLens. Can be either true + or false. + type: boolean + webSql: + description: Allow access for Web SQL. Can be either true + or false. + type: boolean + type: object + type: array + announceHostnames: + description: Announce fqdn instead of ip address. + type: boolean + config: + description: Configuration of the Redis cluster. The structure + is documented below. + items: + properties: + clientOutputBufferLimitNormal: + description: Normal clients output buffer limits. See redis + config file. + type: string + clientOutputBufferLimitPubsub: + description: Pubsub clients output buffer limits. See redis + config file. + type: string + databases: + description: Number of databases (changing requires redis-server + restart). + type: number + maxmemoryPercent: + description: Redis maxmemory usage in percent + type: number + maxmemoryPolicy: + description: Redis key eviction policy for a dataset that + reaches maximum memory. Can be any of the listed in the + official RedisDB documentation. + type: string + notifyKeyspaceEvents: + description: Select the events that Redis will notify among + a set of classes. + type: string + slowlogLogSlowerThan: + description: Log slow queries below this number in microseconds. + type: number + slowlogMaxLen: + description: Slow queries log length. + type: number + timeout: + description: Close the connection after a client is idle + for N seconds. + type: number + version: + description: Version of Redis (6.2). + type: string + type: object + type: array + createdAt: + description: Creation timestamp of the key. + type: string + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the Redis cluster. + type: string + diskSizeAutoscaling: + items: + properties: + diskSizeLimit: + description: Limit of disk size after autoscaling (GiB). + type: number + emergencyUsageThreshold: + description: Immediate autoscaling disk usage (percent). + type: number + plannedUsageThreshold: + description: Maintenance window autoscaling disk usage (percent). + type: number + type: object + type: array + environment: + description: Deployment environment of the Redis cluster. Can + be either PRESTABLE or PRODUCTION. + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + health: + description: Aggregated health of the cluster. Can be either ALIVE, + DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health + field of JSON representation in the official documentation. + type: string + host: + description: A host of the Redis cluster. The structure is documented + below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address or not. + type: boolean + fqdn: + description: The fully qualified domain name of the host. + type: string + replicaPriority: + description: Replica priority of a current replica (usable + for non-sharded only). + type: number + shardName: + description: The name of the shard to which the host belongs. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + zone: + description: The availability zone where the Redis host + will be created. For more information see the official + documentation. + type: string + type: object + type: array + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Redis + cluster. + type: object + x-kubernetes-map-type: granular + maintenanceWindow: + items: + properties: + day: + description: 'Day of week for maintenance window if window + type is weekly. Possible values: MON, TUE, WED, THU, FRI, + SAT, SUN.' + type: string + hour: + description: Hour of day in UTC time zone (1-24) for maintenance + window if window type is weekly. + type: number + type: + description: Type of maintenance window. Can be either ANYTIME + or WEEKLY. A day and hour of window need to be specified + with weekly window. + type: string + type: object + type: array + name: + description: Name of the Redis cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the Redis cluster belongs. + type: string + persistenceMode: + description: Persistence mode. + type: string + resources: + description: Resources allocated to hosts of the Redis cluster. + The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a host, + in gigabytes. + type: number + diskTypeId: + description: Type of the storage of Redis hosts - environment + default is used if missing. + type: string + resourcePresetId: + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + sharded: + description: Redis Cluster mode enabled/disabled. Enables sharding + when cluster non-sharded. If cluster is sharded - disabling + is not allowed. + type: boolean + status: + description: Status of the cluster. Can be either CREATING, STARTING, + RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. + For more information see status field of JSON representation + in the official documentation. + type: string + tlsEnabled: + description: TLS support mode enabled/disabled. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_sqlserverclusters.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_sqlserverclusters.yaml new file mode 100644 index 0000000..877c44e --- /dev/null +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_sqlserverclusters.yaml @@ -0,0 +1,1452 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: sqlserverclusters.mdb.yandex-cloud.upjet.crossplane.io +spec: + group: mdb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SqlserverCluster + listKind: SqlserverClusterList + plural: sqlserverclusters + singular: sqlservercluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SqlserverCluster is the Schema for the SqlserverClusters API. + Manages a Microsoft SQLServer cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SqlserverClusterSpec defines the desired state of SqlserverCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + backupWindowStart: + description: Time to start the daily backup, in the UTC. The structure + is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + database: + description: A database of the SQLServer cluster. The structure + is documented below. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the SQLServer cluster. + type: string + environment: + description: Deployment environment of the SQLServer cluster. + (PRODUCTION, PRESTABLE) + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the SQLServer cluster. The structure is + documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. Changing this parameter for an existing + host is not supported at the moment + type: boolean + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the SQLServer host + will be created. + type: string + type: object + type: array + hostGroupIds: + description: A list of IDs of the host groups hosting VMs of the + cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the SQLServer + cluster. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the SQLServer cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the SQLServer cluster + uses. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resources: + description: Resources allocated to hosts of the SQLServer cluster. + The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a SQLServer + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of SQLServer hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sqlcollation: + description: SQL Collation cluster will be created with. This + attribute cannot be changed when cluster is created! + type: string + sqlserverConfig: + additionalProperties: + type: string + description: SQLServer cluster config. Detail info in "SQLServer + config" section (documented below). + type: object + x-kubernetes-map-type: granular + user: + description: A user of the SQLServer cluster. The structure is + documented below. + items: + properties: + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: 'List user''s roles in the database. + Allowed roles: OWNER, SECURITYADMIN, ACCESSADMIN, + BACKUPOPERATOR, DDLADMIN, DATAWRITER, DATAREADER, + DENYDATAWRITER, DENYDATAREADER.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + version: + description: Version of the SQLServer cluster. (2016sp2std, 2016sp2ent) + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + backupWindowStart: + description: Time to start the daily backup, in the UTC. The structure + is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + database: + description: A database of the SQLServer cluster. The structure + is documented below. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the SQLServer cluster. + type: string + environment: + description: Deployment environment of the SQLServer cluster. + (PRODUCTION, PRESTABLE) + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: A host of the SQLServer cluster. The structure is + documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. Changing this parameter for an existing + host is not supported at the moment + type: boolean + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zone: + description: The availability zone where the SQLServer host + will be created. + type: string + type: object + type: array + hostGroupIds: + description: A list of IDs of the host groups hosting VMs of the + cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the SQLServer + cluster. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the SQLServer cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the SQLServer cluster + uses. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resources: + description: Resources allocated to hosts of the SQLServer cluster. + The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a SQLServer + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of SQLServer hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdsRefs: + description: References to SecurityGroup in vpc to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdsSelector: + description: Selector for a list of SecurityGroup in vpc to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sqlcollation: + description: SQL Collation cluster will be created with. This + attribute cannot be changed when cluster is created! + type: string + sqlserverConfig: + additionalProperties: + type: string + description: SQLServer cluster config. Detail info in "SQLServer + config" section (documented below). + type: object + x-kubernetes-map-type: granular + user: + description: A user of the SQLServer cluster. The structure is + documented below. + items: + properties: + name: + description: The name of the user. + type: string + passwordSecretRef: + description: The password of the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: 'List user''s roles in the database. + Allowed roles: OWNER, SECURITYADMIN, ACCESSADMIN, + BACKUPOPERATOR, DDLADMIN, DATAWRITER, DATAREADER, + DENYDATAWRITER, DENYDATAREADER.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + required: + - passwordSecretRef + type: object + type: array + version: + description: Version of the SQLServer cluster. (2016sp2std, 2016sp2ent) + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.database is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.database) + || (has(self.initProvider) && has(self.initProvider.database))' + - message: spec.forProvider.environment is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.environment) + || (has(self.initProvider) && has(self.initProvider.environment))' + - message: spec.forProvider.host is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.host) + || (has(self.initProvider) && has(self.initProvider.host))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.resources is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resources) + || (has(self.initProvider) && has(self.initProvider.resources))' + - message: spec.forProvider.user is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.user) + || (has(self.initProvider) && has(self.initProvider.user))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: SqlserverClusterStatus defines the observed state of SqlserverCluster. + properties: + atProvider: + properties: + backupWindowStart: + description: Time to start the daily backup, in the UTC. The structure + is documented below. + items: + properties: + hours: + description: The hour at which backup will be started. + type: number + minutes: + description: The minute at which backup will be started. + type: number + type: object + type: array + createdAt: + description: Creation timestamp of the cluster. + type: string + database: + description: A database of the SQLServer cluster. The structure + is documented below. + items: + properties: + name: + description: The name of the database. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either true + or false. + type: boolean + description: + description: Description of the SQLServer cluster. + type: string + environment: + description: Deployment environment of the SQLServer cluster. + (PRODUCTION, PRESTABLE) + type: string + folderId: + description: The ID of the folder that the resource belongs to. + If it is not provided, the default provider folder is used. + type: string + health: + description: Aggregated health of the cluster. + type: string + host: + description: A host of the SQLServer cluster. The structure is + documented below. + items: + properties: + assignPublicIp: + description: Sets whether the host should get a public IP + address on creation. Changing this parameter for an existing + host is not supported at the moment + type: boolean + fqdn: + description: (Computed) The fully qualified domain name + of the host. + type: string + subnetId: + description: The ID of the subnet, to which the host belongs. + The subnet must be a part of the network to which the + cluster belongs. + type: string + zone: + description: The availability zone where the SQLServer host + will be created. + type: string + type: object + type: array + hostGroupIds: + description: A list of IDs of the host groups hosting VMs of the + cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the SQLServer + cluster. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the SQLServer cluster. Provided by the client + when the cluster is created. + type: string + networkId: + description: ID of the network, to which the SQLServer cluster + uses. + type: string + resources: + description: Resources allocated to hosts of the SQLServer cluster. + The structure is documented below. + items: + properties: + diskSize: + description: Volume of the storage available to a SQLServer + host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of SQLServer hosts. + type: string + resourcePresetId: + type: string + type: object + type: array + securityGroupIds: + description: A set of ids of security groups assigned to hosts + of the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + sqlcollation: + description: SQL Collation cluster will be created with. This + attribute cannot be changed when cluster is created! + type: string + sqlserverConfig: + additionalProperties: + type: string + description: SQLServer cluster config. Detail info in "SQLServer + config" section (documented below). + type: object + x-kubernetes-map-type: granular + status: + description: Status of the cluster. + type: string + user: + description: A user of the SQLServer cluster. The structure is + documented below. + items: + properties: + name: + description: The name of the user. + type: string + permission: + description: Set of permissions granted to the user. The + structure is documented below. + items: + properties: + databaseName: + description: The name of the database that the permission + grants access to. + type: string + roles: + description: 'List user''s roles in the database. + Allowed roles: OWNER, SECURITYADMIN, ACCESSADMIN, + BACKUPOPERATOR, DDLADMIN, DATAWRITER, DATAREADER, + DENYDATAWRITER, DENYDATAREADER.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + version: + description: Version of the SQLServer cluster. (2016sp2std, 2016sp2ent) + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/message.yandex-cloud.upjet.crossplane.io_queues.yaml b/package/crds/message.yandex-cloud.upjet.crossplane.io_queues.yaml new file mode 100644 index 0000000..e01ac8f --- /dev/null +++ b/package/crds/message.yandex-cloud.upjet.crossplane.io_queues.yaml @@ -0,0 +1,707 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: queues.message.yandex-cloud.upjet.crossplane.io +spec: + group: message.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Queue + listKind: QueueList + plural: queues + singular: queue + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Queue is the Schema for the Queues API. Allows management of + a Yandex.Cloud Message Queue. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: QueueSpec defines the desired state of Queue + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessKey: + description: The access key to use when applying changes. If omitted, + ymq_access_key specified in provider config is used. For more + information see documentation. + type: string + accessKeyRef: + description: Reference to a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessKeySelector: + description: Selector for a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + contentBasedDeduplication: + description: Enables content-based deduplication. Can be used + only if queue is FIFO. + type: boolean + delaySeconds: + description: 'Number of seconds to delay the message from being + available for processing. Valid values: from 0 to 900 seconds + (15 minutes). Default: 0.' + type: number + fifoQueue: + description: Is this queue FIFO. If this parameter is not used, + a standard queue is created. You cannot change the parameter + value for a created queue. + type: boolean + maxMessageSize: + description: 'Maximum message size in bytes. Valid values: from + 1024 bytes (1 KB) to 262144 bytes (256 KB). Default: 262144 + (256 KB). For more information see documentation.' + type: number + messageRetentionSeconds: + description: 'The length of time in seconds to retain a message. + Valid values: from 60 seconds (1 minute) to 1209600 seconds + (14 days). Default: 345600 (4 days). For more information see + documentation.' + type: number + name: + description: Queue name. The maximum length is 80 characters. + You can use numbers, letters, underscores, and hyphens in the + name. The name of a FIFO queue must end with the .fifo suffix. + If not specified, random name will be generated. Conflicts with + name_prefix. For more information see documentation. + type: string + namePrefix: + description: Generates random name with the specified prefix. + Conflicts with name. + type: string + receiveWaitTimeSeconds: + description: 'Wait time for the ReceiveMessage method (for long + polling), in seconds. Valid values: from 0 to 20 seconds. Default: + 0. For more information about long polling see documentation.' + type: number + redrivePolicy: + description: 'Message redrive policy in Dead Letter Queue. The + source queue and DLQ must be the same type: for FIFO queues, + the DLQ must also be a FIFO queue. For more information about + redrive policy see documentation. Also you can use example in + this page.' + type: string + regionId: + description: ID of the region where the message queue is located + at. The default is 'ru-central1'. + type: string + secretKeySecretRef: + description: The secret key to use when applying changes. If omitted, + ymq_secret_key specified in provider config is used. For more + information see documentation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + visibilityTimeoutSeconds: + description: 'Visibility timeout for messages in a queue, specified + in seconds. Valid values: from 0 to 43200 seconds (12 hours). + Default: 30.' + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessKey: + description: The access key to use when applying changes. If omitted, + ymq_access_key specified in provider config is used. For more + information see documentation. + type: string + accessKeyRef: + description: Reference to a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessKeySelector: + description: Selector for a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + contentBasedDeduplication: + description: Enables content-based deduplication. Can be used + only if queue is FIFO. + type: boolean + delaySeconds: + description: 'Number of seconds to delay the message from being + available for processing. Valid values: from 0 to 900 seconds + (15 minutes). Default: 0.' + type: number + fifoQueue: + description: Is this queue FIFO. If this parameter is not used, + a standard queue is created. You cannot change the parameter + value for a created queue. + type: boolean + maxMessageSize: + description: 'Maximum message size in bytes. Valid values: from + 1024 bytes (1 KB) to 262144 bytes (256 KB). Default: 262144 + (256 KB). For more information see documentation.' + type: number + messageRetentionSeconds: + description: 'The length of time in seconds to retain a message. + Valid values: from 60 seconds (1 minute) to 1209600 seconds + (14 days). Default: 345600 (4 days). For more information see + documentation.' + type: number + name: + description: Queue name. The maximum length is 80 characters. + You can use numbers, letters, underscores, and hyphens in the + name. The name of a FIFO queue must end with the .fifo suffix. + If not specified, random name will be generated. Conflicts with + name_prefix. For more information see documentation. + type: string + namePrefix: + description: Generates random name with the specified prefix. + Conflicts with name. + type: string + receiveWaitTimeSeconds: + description: 'Wait time for the ReceiveMessage method (for long + polling), in seconds. Valid values: from 0 to 20 seconds. Default: + 0. For more information about long polling see documentation.' + type: number + redrivePolicy: + description: 'Message redrive policy in Dead Letter Queue. The + source queue and DLQ must be the same type: for FIFO queues, + the DLQ must also be a FIFO queue. For more information about + redrive policy see documentation. Also you can use example in + this page.' + type: string + regionId: + description: ID of the region where the message queue is located + at. The default is 'ru-central1'. + type: string + secretKeySecretRef: + description: The secret key to use when applying changes. If omitted, + ymq_secret_key specified in provider config is used. For more + information see documentation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + visibilityTimeoutSeconds: + description: 'Visibility timeout for messages in a queue, specified + in seconds. Valid values: from 0 to 43200 seconds (12 hours). + Default: 30.' + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: QueueStatus defines the observed state of Queue. + properties: + atProvider: + properties: + accessKey: + description: The access key to use when applying changes. If omitted, + ymq_access_key specified in provider config is used. For more + information see documentation. + type: string + arn: + description: ARN of the Yandex Message Queue. It is used for setting + up a redrive policy. See documentation. + type: string + contentBasedDeduplication: + description: Enables content-based deduplication. Can be used + only if queue is FIFO. + type: boolean + delaySeconds: + description: 'Number of seconds to delay the message from being + available for processing. Valid values: from 0 to 900 seconds + (15 minutes). Default: 0.' + type: number + fifoQueue: + description: Is this queue FIFO. If this parameter is not used, + a standard queue is created. You cannot change the parameter + value for a created queue. + type: boolean + id: + description: URL of the Yandex Message Queue. + type: string + maxMessageSize: + description: 'Maximum message size in bytes. Valid values: from + 1024 bytes (1 KB) to 262144 bytes (256 KB). Default: 262144 + (256 KB). For more information see documentation.' + type: number + messageRetentionSeconds: + description: 'The length of time in seconds to retain a message. + Valid values: from 60 seconds (1 minute) to 1209600 seconds + (14 days). Default: 345600 (4 days). For more information see + documentation.' + type: number + name: + description: Queue name. The maximum length is 80 characters. + You can use numbers, letters, underscores, and hyphens in the + name. The name of a FIFO queue must end with the .fifo suffix. + If not specified, random name will be generated. Conflicts with + name_prefix. For more information see documentation. + type: string + namePrefix: + description: Generates random name with the specified prefix. + Conflicts with name. + type: string + receiveWaitTimeSeconds: + description: 'Wait time for the ReceiveMessage method (for long + polling), in seconds. Valid values: from 0 to 20 seconds. Default: + 0. For more information about long polling see documentation.' + type: number + redrivePolicy: + description: 'Message redrive policy in Dead Letter Queue. The + source queue and DLQ must be the same type: for FIFO queues, + the DLQ must also be a FIFO queue. For more information about + redrive policy see documentation. Also you can use example in + this page.' + type: string + regionId: + description: ID of the region where the message queue is located + at. The default is 'ru-central1'. + type: string + visibilityTimeoutSeconds: + description: 'Visibility timeout for messages in a queue, specified + in seconds. Valid values: from 0 to 43200 seconds (12 hours). + Default: 30.' + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/monitoring.yandex-cloud.upjet.crossplane.io_dashboards.yaml b/package/crds/monitoring.yandex-cloud.upjet.crossplane.io_dashboards.yaml new file mode 100644 index 0000000..3be3213 --- /dev/null +++ b/package/crds/monitoring.yandex-cloud.upjet.crossplane.io_dashboards.yaml @@ -0,0 +1,2379 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: dashboards.monitoring.yandex-cloud.upjet.crossplane.io +spec: + group: monitoring.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Dashboard + listKind: DashboardList + plural: dashboards + singular: dashboard + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Dashboard is the Schema for the Dashboards API. Allows management + of a Yandex.Cloud Monitoring Dashboard. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DashboardSpec defines the desired state of Dashboard + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: |- + Dashboard description. + Dashboard description + type: string + folderId: + description: |- + Folder that the resource belongs to. If value is omitted, the default provider folder is used. + Folder ID + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: |- + A set of key/value label pairs to assign to the Dashboard. + Dashboard labels + type: object + x-kubernetes-map-type: granular + name: + description: |- + Name of the Dashboard. + Dashboard name, used as local identifier in folder_id + type: string + parametrization: + description: |- + Dashboard parametrization + Dashboard parametrization + items: + properties: + parameters: + description: |- + parameters list. + Dashboard parameter + items: + properties: + custom: + description: |- + Custom values parameter. Oneof: label_values, custom, text. + Custom parameter + items: + properties: + defaultValues: + description: |- + Default value. + Default value + items: + type: string + type: array + multiselectable: + description: |- + Specifies the multiselectable values of parameter. + Specifies the multiselectable values of parameter + type: boolean + values: + description: |- + Parameter values. + Parameter values + items: + type: string + type: array + type: object + type: array + description: + description: |- + Parameter description. + Parameter description + type: string + hidden: + description: |- + UI-visibility. + UI-visibility + type: boolean + id: + description: |- + Parameter identifier + Parameter identifier + type: string + labelValues: + description: |- + Label values parameter. Oneof: label_values, custom, text. + Label values parameter + items: + properties: + defaultValues: + description: |- + Default value. + Default value + items: + type: string + type: array + folderId: + description: |- + Labels folder ID. + Folder ID + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager + to populate folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager + to populate folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labelKey: + description: |- + Label key to list label values. + Required. Label key to list label values + type: string + multiselectable: + description: |- + Specifies the multiselectable values of parameter. + Specifies the multiselectable values of parameter + type: boolean + selectors: + description: |- + dashboard predefined parameters selector. + Required. Selectors to select metric label values + type: string + type: object + type: array + text: + description: |- + Text parameter. Oneof: label_values, custom, text. + Text parameter + items: + properties: + defaultValue: + description: |- + Default value. + Default value + type: string + type: object + type: array + title: + description: |- + UI-visible title of the parameter. + UI-visible title of the parameter + type: string + type: object + type: array + selectors: + description: |- + dashboard predefined parameters selector. + Predefined selectors + type: string + type: object + type: array + title: + description: |- + Dashboard title. + Dashboard title + type: string + widgets: + description: |- + Widgets + Widgets + items: + properties: + chart: + description: |- + Chart widget settings. Oneof: text, title or chart. + Chart widget + items: + properties: + chartId: + description: |- + Chart ID. + Chart ID + type: string + description: + description: |- + Parameter description. + Chart description in dashboard (not enabled in UI) + type: string + displayLegend: + description: |- + Enable legend under chart. + Enable legend under chart + type: boolean + freeze: + description: |- + Fixed time interval for chart. Values: + Fixed time interval for chart + type: string + nameHidingSettings: + description: |- + Names settings. + Name hiding settings + items: + properties: + names: + description: Series name. + items: + type: string + type: array + positive: + description: |- + True if we want to show concrete series names only, false if we want to hide concrete series names. + True if we want to show concrete series names only, false if we want to hide concrete series names + type: boolean + type: object + type: array + queries: + description: |- + Queries settings. + Queries + items: + properties: + downsampling: + description: |- + Downsamplang settings. + Downsampling settings + items: + properties: + disabled: + description: |- + Disable downsampling. + Disable downsampling + type: boolean + gapFilling: + description: |- + Parameters for filling gaps in data. + Parameters for filling gaps in data + type: string + gridAggregation: + description: |- + Function that is used for downsampling. + Function that is used for downsampling + type: string + gridInterval: + description: |- + Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + type: number + maxPoints: + description: |- + Maximum number of points to be returned. + Maximum number of points to be returned + type: number + type: object + type: array + target: + description: |- + Query targets. + Downsampling settings + items: + properties: + hidden: + description: |- + Checks that target is visible or invisible. + Checks that target is visible or invisible + type: boolean + query: + description: |- + Query. + Required. Query + type: string + textMode: + description: |- + Text mode enabled. + Text mode + type: boolean + type: object + type: array + type: object + type: array + seriesOverrides: + description: Time series settings. + items: + properties: + name: + description: |- + Series name or empty. + Series name + type: string + settings: + description: |- + Override settings. + Override settings + items: + properties: + color: + description: |- + Series color or empty. + Series color or empty + type: string + growDown: + description: |- + Stack grow down. + Stack grow down + type: boolean + name: + description: |- + Series name or empty. + Series name or empty + type: string + stackName: + description: |- + Stack name or empty. + Stack name or empty + type: string + type: + description: |- + Type. Values: + Type + type: string + yaxisPosition: + description: |- + Yaxis position. + Yaxis position + type: string + type: object + type: array + targetIndex: + description: |- + Series index. Oneof: name or target_index. + Target index + type: string + type: object + type: array + title: + description: |- + Title or empty. + Chart widget title + type: string + visualizationSettings: + description: |- + Visualization settings. + Visualization settings + items: + properties: + aggregation: + description: |- + Aggregation. Values: + Aggregation + type: string + colorSchemeSettings: + description: |- + Color settings. + Color scheme settings + items: + properties: + automatic: + description: |- + Automatic color scheme. Oneof: automatic, standard or gradient. + Automatic color scheme + items: + type: object + type: array + gradient: + description: |- + Gradient color scheme. Oneof: automatic, standard or gradient. + Gradient color scheme + items: + properties: + greenValue: + description: |- + Gradient green value. + Gradient green value + type: string + redValue: + description: |- + Gradient red value. + Gradient red value + type: string + violetValue: + description: |- + Gradient violet value. + Gradient violet_value + type: string + yellowValue: + description: |- + Gradient yellow value. + Gradient yellow value + type: string + type: object + type: array + standard: + description: |- + Standard color scheme. Oneof: automatic, standard or gradient. + Standard color scheme + items: + type: object + type: array + type: object + type: array + heatmapSettings: + description: |- + Heatmap settings. + Heatmap settings + items: + properties: + greenValue: + description: |- + Gradient green value. + Heatmap green value + type: string + redValue: + description: |- + Gradient red value. + Heatmap red value + type: string + violetValue: + description: |- + Gradient violet value. + Heatmap violet_value + type: string + yellowValue: + description: |- + Gradient yellow value. + Heatmap yellow value + type: string + type: object + type: array + interpolate: + description: |- + Interpolate values. Values: + Interpolate + type: string + normalize: + description: |- + Normalize values. + Normalize + type: boolean + showLabels: + description: |- + Show chart labels. + Show chart labels + type: boolean + title: + description: |- + Title or empty. + Inside chart title + type: string + type: + description: |- + Type. Values: + Visualization type + type: string + yaxisSettings: + description: |- + Y axis settings. + Y axis settings + items: + properties: + left: + description: |- + Left yaxis config. + Left Y axis settings + items: + properties: + max: + description: |- + Max value in extended number format or empty. + Max value in extended number format or empty + type: string + min: + description: |- + Min value in extended number format or empty. + Min value in extended number format or empty + type: string + precision: + description: |- + Tick value precision (null as default, 0-7 in other cases). + Tick value precision (null as default, 0-7 in other cases) + type: number + title: + description: |- + Title or empty. + Title or empty + type: string + type: + description: |- + Type. Values: + Type + type: string + unitFormat: + description: |- + Unit format. Values: + Unit format + type: string + type: object + type: array + right: + description: |- + Right yaxis config. + Right Y axis settings + items: + properties: + max: + description: |- + Max value in extended number format or empty. + Max value in extended number format or empty + type: string + min: + description: |- + Min value in extended number format or empty. + Min value in extended number format or empty + type: string + precision: + description: |- + Tick value precision (null as default, 0-7 in other cases). + Tick value precision (null as default, 0-7 in other cases) + type: number + title: + description: |- + Title or empty. + Title or empty + type: string + type: + description: |- + Type. Values: + Type + type: string + unitFormat: + description: |- + Unit format. Values: + Unit format + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + position: + description: |- + Widget position. + Required. Widget layout position + items: + properties: + h: + description: |- + Height. + Required. Height + type: number + w: + description: |- + Width. + Required. Weight + type: number + x: + description: |- + X-axis top-left corner coordinate. + Required. X-axis top-left corner coordinate + type: number + "y": + description: |- + Y-axis top-left corner coordinate. + Required. Y-axis top-left corner coordinate + type: number + type: object + type: array + text: + description: |- + Text widget settings. Oneof: text, title or chart. + Text widget + items: + properties: + text: + description: |- + Text widget settings. Oneof: text, title or chart. + Text + type: string + type: object + type: array + title: + description: |- + Title widget settings. Oneof: text, title or chart. + Title widget + items: + properties: + size: + description: |- + Title size. Values: + Title size + type: string + text: + description: |- + Text widget settings. Oneof: text, title or chart. + Title text + type: string + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: |- + Dashboard description. + Dashboard description + type: string + folderId: + description: |- + Folder that the resource belongs to. If value is omitted, the default provider folder is used. + Folder ID + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: |- + A set of key/value label pairs to assign to the Dashboard. + Dashboard labels + type: object + x-kubernetes-map-type: granular + name: + description: |- + Name of the Dashboard. + Dashboard name, used as local identifier in folder_id + type: string + parametrization: + description: |- + Dashboard parametrization + Dashboard parametrization + items: + properties: + parameters: + description: |- + parameters list. + Dashboard parameter + items: + properties: + custom: + description: |- + Custom values parameter. Oneof: label_values, custom, text. + Custom parameter + items: + properties: + defaultValues: + description: |- + Default value. + Default value + items: + type: string + type: array + multiselectable: + description: |- + Specifies the multiselectable values of parameter. + Specifies the multiselectable values of parameter + type: boolean + values: + description: |- + Parameter values. + Parameter values + items: + type: string + type: array + type: object + type: array + description: + description: |- + Parameter description. + Parameter description + type: string + hidden: + description: |- + UI-visibility. + UI-visibility + type: boolean + id: + description: |- + Parameter identifier + Parameter identifier + type: string + labelValues: + description: |- + Label values parameter. Oneof: label_values, custom, text. + Label values parameter + items: + properties: + defaultValues: + description: |- + Default value. + Default value + items: + type: string + type: array + folderId: + description: |- + Labels folder ID. + Folder ID + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager + to populate folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager + to populate folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labelKey: + description: |- + Label key to list label values. + Required. Label key to list label values + type: string + multiselectable: + description: |- + Specifies the multiselectable values of parameter. + Specifies the multiselectable values of parameter + type: boolean + selectors: + description: |- + dashboard predefined parameters selector. + Required. Selectors to select metric label values + type: string + type: object + type: array + text: + description: |- + Text parameter. Oneof: label_values, custom, text. + Text parameter + items: + properties: + defaultValue: + description: |- + Default value. + Default value + type: string + type: object + type: array + title: + description: |- + UI-visible title of the parameter. + UI-visible title of the parameter + type: string + type: object + type: array + selectors: + description: |- + dashboard predefined parameters selector. + Predefined selectors + type: string + type: object + type: array + title: + description: |- + Dashboard title. + Dashboard title + type: string + widgets: + description: |- + Widgets + Widgets + items: + properties: + chart: + description: |- + Chart widget settings. Oneof: text, title or chart. + Chart widget + items: + properties: + chartId: + description: |- + Chart ID. + Chart ID + type: string + description: + description: |- + Parameter description. + Chart description in dashboard (not enabled in UI) + type: string + displayLegend: + description: |- + Enable legend under chart. + Enable legend under chart + type: boolean + freeze: + description: |- + Fixed time interval for chart. Values: + Fixed time interval for chart + type: string + nameHidingSettings: + description: |- + Names settings. + Name hiding settings + items: + properties: + names: + description: Series name. + items: + type: string + type: array + positive: + description: |- + True if we want to show concrete series names only, false if we want to hide concrete series names. + True if we want to show concrete series names only, false if we want to hide concrete series names + type: boolean + type: object + type: array + queries: + description: |- + Queries settings. + Queries + items: + properties: + downsampling: + description: |- + Downsamplang settings. + Downsampling settings + items: + properties: + disabled: + description: |- + Disable downsampling. + Disable downsampling + type: boolean + gapFilling: + description: |- + Parameters for filling gaps in data. + Parameters for filling gaps in data + type: string + gridAggregation: + description: |- + Function that is used for downsampling. + Function that is used for downsampling + type: string + gridInterval: + description: |- + Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + type: number + maxPoints: + description: |- + Maximum number of points to be returned. + Maximum number of points to be returned + type: number + type: object + type: array + target: + description: |- + Query targets. + Downsampling settings + items: + properties: + hidden: + description: |- + Checks that target is visible or invisible. + Checks that target is visible or invisible + type: boolean + query: + description: |- + Query. + Required. Query + type: string + textMode: + description: |- + Text mode enabled. + Text mode + type: boolean + type: object + type: array + type: object + type: array + seriesOverrides: + description: Time series settings. + items: + properties: + name: + description: |- + Series name or empty. + Series name + type: string + settings: + description: |- + Override settings. + Override settings + items: + properties: + color: + description: |- + Series color or empty. + Series color or empty + type: string + growDown: + description: |- + Stack grow down. + Stack grow down + type: boolean + name: + description: |- + Series name or empty. + Series name or empty + type: string + stackName: + description: |- + Stack name or empty. + Stack name or empty + type: string + type: + description: |- + Type. Values: + Type + type: string + yaxisPosition: + description: |- + Yaxis position. + Yaxis position + type: string + type: object + type: array + targetIndex: + description: |- + Series index. Oneof: name or target_index. + Target index + type: string + type: object + type: array + title: + description: |- + Title or empty. + Chart widget title + type: string + visualizationSettings: + description: |- + Visualization settings. + Visualization settings + items: + properties: + aggregation: + description: |- + Aggregation. Values: + Aggregation + type: string + colorSchemeSettings: + description: |- + Color settings. + Color scheme settings + items: + properties: + automatic: + description: |- + Automatic color scheme. Oneof: automatic, standard or gradient. + Automatic color scheme + items: + type: object + type: array + gradient: + description: |- + Gradient color scheme. Oneof: automatic, standard or gradient. + Gradient color scheme + items: + properties: + greenValue: + description: |- + Gradient green value. + Gradient green value + type: string + redValue: + description: |- + Gradient red value. + Gradient red value + type: string + violetValue: + description: |- + Gradient violet value. + Gradient violet_value + type: string + yellowValue: + description: |- + Gradient yellow value. + Gradient yellow value + type: string + type: object + type: array + standard: + description: |- + Standard color scheme. Oneof: automatic, standard or gradient. + Standard color scheme + items: + type: object + type: array + type: object + type: array + heatmapSettings: + description: |- + Heatmap settings. + Heatmap settings + items: + properties: + greenValue: + description: |- + Gradient green value. + Heatmap green value + type: string + redValue: + description: |- + Gradient red value. + Heatmap red value + type: string + violetValue: + description: |- + Gradient violet value. + Heatmap violet_value + type: string + yellowValue: + description: |- + Gradient yellow value. + Heatmap yellow value + type: string + type: object + type: array + interpolate: + description: |- + Interpolate values. Values: + Interpolate + type: string + normalize: + description: |- + Normalize values. + Normalize + type: boolean + showLabels: + description: |- + Show chart labels. + Show chart labels + type: boolean + title: + description: |- + Title or empty. + Inside chart title + type: string + type: + description: |- + Type. Values: + Visualization type + type: string + yaxisSettings: + description: |- + Y axis settings. + Y axis settings + items: + properties: + left: + description: |- + Left yaxis config. + Left Y axis settings + items: + properties: + max: + description: |- + Max value in extended number format or empty. + Max value in extended number format or empty + type: string + min: + description: |- + Min value in extended number format or empty. + Min value in extended number format or empty + type: string + precision: + description: |- + Tick value precision (null as default, 0-7 in other cases). + Tick value precision (null as default, 0-7 in other cases) + type: number + title: + description: |- + Title or empty. + Title or empty + type: string + type: + description: |- + Type. Values: + Type + type: string + unitFormat: + description: |- + Unit format. Values: + Unit format + type: string + type: object + type: array + right: + description: |- + Right yaxis config. + Right Y axis settings + items: + properties: + max: + description: |- + Max value in extended number format or empty. + Max value in extended number format or empty + type: string + min: + description: |- + Min value in extended number format or empty. + Min value in extended number format or empty + type: string + precision: + description: |- + Tick value precision (null as default, 0-7 in other cases). + Tick value precision (null as default, 0-7 in other cases) + type: number + title: + description: |- + Title or empty. + Title or empty + type: string + type: + description: |- + Type. Values: + Type + type: string + unitFormat: + description: |- + Unit format. Values: + Unit format + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + position: + description: |- + Widget position. + Required. Widget layout position + items: + properties: + h: + description: |- + Height. + Required. Height + type: number + w: + description: |- + Width. + Required. Weight + type: number + x: + description: |- + X-axis top-left corner coordinate. + Required. X-axis top-left corner coordinate + type: number + "y": + description: |- + Y-axis top-left corner coordinate. + Required. Y-axis top-left corner coordinate + type: number + type: object + type: array + text: + description: |- + Text widget settings. Oneof: text, title or chart. + Text widget + items: + properties: + text: + description: |- + Text widget settings. Oneof: text, title or chart. + Text + type: string + type: object + type: array + title: + description: |- + Title widget settings. Oneof: text, title or chart. + Title widget + items: + properties: + size: + description: |- + Title size. Values: + Title size + type: string + text: + description: |- + Text widget settings. Oneof: text, title or chart. + Title text + type: string + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: DashboardStatus defines the observed state of Dashboard. + properties: + atProvider: + properties: + dashboardId: + description: Dashboard ID + type: string + description: + description: |- + Dashboard description. + Dashboard description + type: string + folderId: + description: |- + Folder that the resource belongs to. If value is omitted, the default provider folder is used. + Folder ID + type: string + id: + description: Parameter identifier + type: string + labels: + additionalProperties: + type: string + description: |- + A set of key/value label pairs to assign to the Dashboard. + Dashboard labels + type: object + x-kubernetes-map-type: granular + name: + description: |- + Name of the Dashboard. + Dashboard name, used as local identifier in folder_id + type: string + parametrization: + description: |- + Dashboard parametrization + Dashboard parametrization + items: + properties: + parameters: + description: |- + parameters list. + Dashboard parameter + items: + properties: + custom: + description: |- + Custom values parameter. Oneof: label_values, custom, text. + Custom parameter + items: + properties: + defaultValues: + description: |- + Default value. + Default value + items: + type: string + type: array + multiselectable: + description: |- + Specifies the multiselectable values of parameter. + Specifies the multiselectable values of parameter + type: boolean + values: + description: |- + Parameter values. + Parameter values + items: + type: string + type: array + type: object + type: array + description: + description: |- + Parameter description. + Parameter description + type: string + hidden: + description: |- + UI-visibility. + UI-visibility + type: boolean + id: + description: |- + Parameter identifier + Parameter identifier + type: string + labelValues: + description: |- + Label values parameter. Oneof: label_values, custom, text. + Label values parameter + items: + properties: + defaultValues: + description: |- + Default value. + Default value + items: + type: string + type: array + folderId: + description: |- + Labels folder ID. + Folder ID + type: string + labelKey: + description: |- + Label key to list label values. + Required. Label key to list label values + type: string + multiselectable: + description: |- + Specifies the multiselectable values of parameter. + Specifies the multiselectable values of parameter + type: boolean + selectors: + description: |- + dashboard predefined parameters selector. + Required. Selectors to select metric label values + type: string + type: object + type: array + text: + description: |- + Text parameter. Oneof: label_values, custom, text. + Text parameter + items: + properties: + defaultValue: + description: |- + Default value. + Default value + type: string + type: object + type: array + title: + description: |- + UI-visible title of the parameter. + UI-visible title of the parameter + type: string + type: object + type: array + selectors: + description: |- + dashboard predefined parameters selector. + Predefined selectors + type: string + type: object + type: array + title: + description: |- + Dashboard title. + Dashboard title + type: string + widgets: + description: |- + Widgets + Widgets + items: + properties: + chart: + description: |- + Chart widget settings. Oneof: text, title or chart. + Chart widget + items: + properties: + chartId: + description: |- + Chart ID. + Chart ID + type: string + description: + description: |- + Parameter description. + Chart description in dashboard (not enabled in UI) + type: string + displayLegend: + description: |- + Enable legend under chart. + Enable legend under chart + type: boolean + freeze: + description: |- + Fixed time interval for chart. Values: + Fixed time interval for chart + type: string + nameHidingSettings: + description: |- + Names settings. + Name hiding settings + items: + properties: + names: + description: Series name. + items: + type: string + type: array + positive: + description: |- + True if we want to show concrete series names only, false if we want to hide concrete series names. + True if we want to show concrete series names only, false if we want to hide concrete series names + type: boolean + type: object + type: array + queries: + description: |- + Queries settings. + Queries + items: + properties: + downsampling: + description: |- + Downsamplang settings. + Downsampling settings + items: + properties: + disabled: + description: |- + Disable downsampling. + Disable downsampling + type: boolean + gapFilling: + description: |- + Parameters for filling gaps in data. + Parameters for filling gaps in data + type: string + gridAggregation: + description: |- + Function that is used for downsampling. + Function that is used for downsampling + type: string + gridInterval: + description: |- + Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + Time interval (grid) for downsampling in milliseconds. Points in the specified range are aggregated into one time point + type: number + maxPoints: + description: |- + Maximum number of points to be returned. + Maximum number of points to be returned + type: number + type: object + type: array + target: + description: |- + Query targets. + Downsampling settings + items: + properties: + hidden: + description: |- + Checks that target is visible or invisible. + Checks that target is visible or invisible + type: boolean + query: + description: |- + Query. + Required. Query + type: string + textMode: + description: |- + Text mode enabled. + Text mode + type: boolean + type: object + type: array + type: object + type: array + seriesOverrides: + description: Time series settings. + items: + properties: + name: + description: |- + Series name or empty. + Series name + type: string + settings: + description: |- + Override settings. + Override settings + items: + properties: + color: + description: |- + Series color or empty. + Series color or empty + type: string + growDown: + description: |- + Stack grow down. + Stack grow down + type: boolean + name: + description: |- + Series name or empty. + Series name or empty + type: string + stackName: + description: |- + Stack name or empty. + Stack name or empty + type: string + type: + description: |- + Type. Values: + Type + type: string + yaxisPosition: + description: |- + Yaxis position. + Yaxis position + type: string + type: object + type: array + targetIndex: + description: |- + Series index. Oneof: name or target_index. + Target index + type: string + type: object + type: array + title: + description: |- + Title or empty. + Chart widget title + type: string + visualizationSettings: + description: |- + Visualization settings. + Visualization settings + items: + properties: + aggregation: + description: |- + Aggregation. Values: + Aggregation + type: string + colorSchemeSettings: + description: |- + Color settings. + Color scheme settings + items: + properties: + automatic: + description: |- + Automatic color scheme. Oneof: automatic, standard or gradient. + Automatic color scheme + items: + type: object + type: array + gradient: + description: |- + Gradient color scheme. Oneof: automatic, standard or gradient. + Gradient color scheme + items: + properties: + greenValue: + description: |- + Gradient green value. + Gradient green value + type: string + redValue: + description: |- + Gradient red value. + Gradient red value + type: string + violetValue: + description: |- + Gradient violet value. + Gradient violet_value + type: string + yellowValue: + description: |- + Gradient yellow value. + Gradient yellow value + type: string + type: object + type: array + standard: + description: |- + Standard color scheme. Oneof: automatic, standard or gradient. + Standard color scheme + items: + type: object + type: array + type: object + type: array + heatmapSettings: + description: |- + Heatmap settings. + Heatmap settings + items: + properties: + greenValue: + description: |- + Gradient green value. + Heatmap green value + type: string + redValue: + description: |- + Gradient red value. + Heatmap red value + type: string + violetValue: + description: |- + Gradient violet value. + Heatmap violet_value + type: string + yellowValue: + description: |- + Gradient yellow value. + Heatmap yellow value + type: string + type: object + type: array + interpolate: + description: |- + Interpolate values. Values: + Interpolate + type: string + normalize: + description: |- + Normalize values. + Normalize + type: boolean + showLabels: + description: |- + Show chart labels. + Show chart labels + type: boolean + title: + description: |- + Title or empty. + Inside chart title + type: string + type: + description: |- + Type. Values: + Visualization type + type: string + yaxisSettings: + description: |- + Y axis settings. + Y axis settings + items: + properties: + left: + description: |- + Left yaxis config. + Left Y axis settings + items: + properties: + max: + description: |- + Max value in extended number format or empty. + Max value in extended number format or empty + type: string + min: + description: |- + Min value in extended number format or empty. + Min value in extended number format or empty + type: string + precision: + description: |- + Tick value precision (null as default, 0-7 in other cases). + Tick value precision (null as default, 0-7 in other cases) + type: number + title: + description: |- + Title or empty. + Title or empty + type: string + type: + description: |- + Type. Values: + Type + type: string + unitFormat: + description: |- + Unit format. Values: + Unit format + type: string + type: object + type: array + right: + description: |- + Right yaxis config. + Right Y axis settings + items: + properties: + max: + description: |- + Max value in extended number format or empty. + Max value in extended number format or empty + type: string + min: + description: |- + Min value in extended number format or empty. + Min value in extended number format or empty + type: string + precision: + description: |- + Tick value precision (null as default, 0-7 in other cases). + Tick value precision (null as default, 0-7 in other cases) + type: number + title: + description: |- + Title or empty. + Title or empty + type: string + type: + description: |- + Type. Values: + Type + type: string + unitFormat: + description: |- + Unit format. Values: + Unit format + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + position: + description: |- + Widget position. + Required. Widget layout position + items: + properties: + h: + description: |- + Height. + Required. Height + type: number + w: + description: |- + Width. + Required. Weight + type: number + x: + description: |- + X-axis top-left corner coordinate. + Required. X-axis top-left corner coordinate + type: number + "y": + description: |- + Y-axis top-left corner coordinate. + Required. Y-axis top-left corner coordinate + type: number + type: object + type: array + text: + description: |- + Text widget settings. Oneof: text, title or chart. + Text widget + items: + properties: + text: + description: |- + Text widget settings. Oneof: text, title or chart. + Text + type: string + type: object + type: array + title: + description: |- + Title widget settings. Oneof: text, title or chart. + Title widget + items: + properties: + size: + description: |- + Title size. Values: + Title size + type: string + text: + description: |- + Text widget settings. Oneof: text, title or chart. + Title text + type: string + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_groupiammembers.yaml b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_groupiammembers.yaml new file mode 100644 index 0000000..d65f3fb --- /dev/null +++ b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_groupiammembers.yaml @@ -0,0 +1,523 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: groupiammembers.organizationmanager.yandex-cloud.upjet.crossplane.io +spec: + group: organizationmanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: GroupIAMMember + listKind: GroupIAMMemberList + plural: groupiammembers + singular: groupiammember + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: GroupIAMMember is the Schema for the GroupIAMMembers API. Allows + management of a single member for a single IAM binding on a Yandex.Cloud + Organization Manager Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GroupIAMMemberSpec defines the desired state of GroupIAMMember + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + groupId: + description: ID of the organization to attach a policy to. + type: string + groupIdRef: + description: Reference to a Group to populate groupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + groupIdSelector: + description: Selector for a Group to populate groupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + groupId: + description: ID of the organization to attach a policy to. + type: string + groupIdRef: + description: Reference to a Group to populate groupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + groupIdSelector: + description: Selector for a Group to populate groupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.member is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.member) + || (has(self.initProvider) && has(self.initProvider.member))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: GroupIAMMemberStatus defines the observed state of GroupIAMMember. + properties: + atProvider: + properties: + groupId: + description: ID of the organization to attach a policy to. + type: string + id: + type: string + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_groupmemberships.yaml b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_groupmemberships.yaml new file mode 100644 index 0000000..8d5cb26 --- /dev/null +++ b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_groupmemberships.yaml @@ -0,0 +1,509 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: groupmemberships.organizationmanager.yandex-cloud.upjet.crossplane.io +spec: + group: organizationmanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: GroupMembership + listKind: GroupMembershipList + plural: groupmemberships + singular: groupmembership + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: GroupMembership is the Schema for the GroupMemberships API. Allows + management of members of Yandex.Cloud Organization Manager Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GroupMembershipSpec defines the desired state of GroupMembership + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + groupId: + description: The Group to add/remove members to/from. + type: string + groupIdRef: + description: Reference to a Group to populate groupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + groupIdSelector: + description: Selector for a Group to populate groupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: A set of members of the Group. Each member is represented + by an id. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + groupId: + description: The Group to add/remove members to/from. + type: string + groupIdRef: + description: Reference to a Group to populate groupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + groupIdSelector: + description: Selector for a Group to populate groupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: A set of members of the Group. Each member is represented + by an id. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + status: + description: GroupMembershipStatus defines the observed state of GroupMembership. + properties: + atProvider: + properties: + groupId: + description: The Group to add/remove members to/from. + type: string + id: + type: string + members: + description: A set of members of the Group. Each member is represented + by an id. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_groups.yaml b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_groups.yaml new file mode 100644 index 0000000..edc5a59 --- /dev/null +++ b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_groups.yaml @@ -0,0 +1,365 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: groups.organizationmanager.yandex-cloud.upjet.crossplane.io +spec: + group: organizationmanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Group + listKind: GroupList + plural: groups + singular: group + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Group is the Schema for the Groups API. Allows management of + a single Group within an existing Yandex.Cloud Organization. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GroupSpec defines the desired state of Group + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the Group. + type: string + name: + description: The name of the Group. + type: string + organizationId: + description: The organization to attach this Group to. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the Group. + type: string + name: + description: The name of the Group. + type: string + organizationId: + description: The organization to attach this Group to. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.organizationId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.organizationId) + || (has(self.initProvider) && has(self.initProvider.organizationId))' + status: + description: GroupStatus defines the observed state of Group. + properties: + atProvider: + properties: + createdAt: + description: (Computed) The SAML Federation creation timestamp. + type: string + description: + description: The description of the Group. + type: string + id: + type: string + name: + description: The name of the Group. + type: string + organizationId: + description: The organization to attach this Group to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_organizationiambindings.yaml b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_organizationiambindings.yaml new file mode 100644 index 0000000..a35d6bc --- /dev/null +++ b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_organizationiambindings.yaml @@ -0,0 +1,392 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: organizationiambindings.organizationmanager.yandex-cloud.upjet.crossplane.io +spec: + group: organizationmanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: OrganizationIAMBinding + listKind: OrganizationIAMBindingList + plural: organizationiambindings + singular: organizationiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: OrganizationIAMBinding is the Schema for the OrganizationIAMBindings + API. Allows management of a single IAM binding for a Yandex Organization + Manager organization. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OrganizationIAMBindingSpec defines the desired state of OrganizationIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + organizationId: + description: ID of the organization to attach the policy to. + type: string + role: + description: The role that should be assigned. Only one yandex_organizationmanager_organization_iam_binding + can be used per role. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + organizationId: + description: ID of the organization to attach the policy to. + type: string + role: + description: The role that should be assigned. Only one yandex_organizationmanager_organization_iam_binding + can be used per role. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.organizationId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.organizationId) + || (has(self.initProvider) && has(self.initProvider.organizationId))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: OrganizationIAMBindingStatus defines the observed state of + OrganizationIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + organizationId: + description: ID of the organization to attach the policy to. + type: string + role: + description: The role that should be assigned. Only one yandex_organizationmanager_organization_iam_binding + can be used per role. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_organizationiammembers.yaml b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_organizationiammembers.yaml new file mode 100644 index 0000000..8c21fca --- /dev/null +++ b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_organizationiammembers.yaml @@ -0,0 +1,380 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: organizationiammembers.organizationmanager.yandex-cloud.upjet.crossplane.io +spec: + group: organizationmanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: OrganizationIAMMember + listKind: OrganizationIAMMemberList + plural: organizationiammembers + singular: organizationiammember + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: OrganizationIAMMember is the Schema for the OrganizationIAMMembers + API. Allows management of a single member for a single IAM binding on a + Yandex.Cloud Organization Manager organization. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OrganizationIAMMemberSpec defines the desired state of OrganizationIAMMember + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + organizationId: + description: ID of the organization to attach a policy to. + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + organizationId: + description: ID of the organization to attach a policy to. + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.member is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.member) + || (has(self.initProvider) && has(self.initProvider.member))' + - message: spec.forProvider.organizationId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.organizationId) + || (has(self.initProvider) && has(self.initProvider.organizationId))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: OrganizationIAMMemberStatus defines the observed state of + OrganizationIAMMember. + properties: + atProvider: + properties: + id: + type: string + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + organizationId: + description: ID of the organization to attach a policy to. + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_osloginsettings.yaml b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_osloginsettings.yaml new file mode 100644 index 0000000..383f598 --- /dev/null +++ b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_osloginsettings.yaml @@ -0,0 +1,412 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: osloginsettings.organizationmanager.yandex-cloud.upjet.crossplane.io +spec: + group: organizationmanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: OsLoginSettings + listKind: OsLoginSettingsList + plural: osloginsettings + singular: osloginsettings + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: OsLoginSettings is the Schema for the OsLoginSettingss API. Allows + management of OsLogin Settings within an existing Yandex.Cloud Organization. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OsLoginSettingsSpec defines the desired state of OsLoginSettings + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + organizationId: + description: The organization to manage it's OsLogin Settings. + type: string + sshCertificateSettings: + description: The structure is documented below. + items: + properties: + enabled: + description: Enables or disables usage of ssh certificates + signed by trusted CA. + type: boolean + type: object + type: array + userSshKeySettings: + description: The structure is documented below. + items: + properties: + allowManageOwnKeys: + description: If set to true subject is allowed to manage + own ssh keys without having to be assigned specific permissions. + type: boolean + enabled: + description: Enables or disables usage of ssh keys assigned + to a specific subject. + type: boolean + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + organizationId: + description: The organization to manage it's OsLogin Settings. + type: string + sshCertificateSettings: + description: The structure is documented below. + items: + properties: + enabled: + description: Enables or disables usage of ssh certificates + signed by trusted CA. + type: boolean + type: object + type: array + userSshKeySettings: + description: The structure is documented below. + items: + properties: + allowManageOwnKeys: + description: If set to true subject is allowed to manage + own ssh keys without having to be assigned specific permissions. + type: boolean + enabled: + description: Enables or disables usage of ssh keys assigned + to a specific subject. + type: boolean + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.organizationId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.organizationId) + || (has(self.initProvider) && has(self.initProvider.organizationId))' + status: + description: OsLoginSettingsStatus defines the observed state of OsLoginSettings. + properties: + atProvider: + properties: + id: + type: string + organizationId: + description: The organization to manage it's OsLogin Settings. + type: string + sshCertificateSettings: + description: The structure is documented below. + items: + properties: + enabled: + description: Enables or disables usage of ssh certificates + signed by trusted CA. + type: boolean + type: object + type: array + userSshKeySettings: + description: The structure is documented below. + items: + properties: + allowManageOwnKeys: + description: If set to true subject is allowed to manage + own ssh keys without having to be assigned specific permissions. + type: boolean + enabled: + description: Enables or disables usage of ssh keys assigned + to a specific subject. + type: boolean + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_samlfederations.yaml b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_samlfederations.yaml new file mode 100644 index 0000000..4c0da7b --- /dev/null +++ b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_samlfederations.yaml @@ -0,0 +1,521 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: samlfederations.organizationmanager.yandex-cloud.upjet.crossplane.io +spec: + group: organizationmanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SAMLFederation + listKind: SAMLFederationList + plural: samlfederations + singular: samlfederation + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SAMLFederation is the Schema for the SAMLFederations API. Allows + management of a single SAML Federation within an existing Yandex.Cloud Organization. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SAMLFederationSpec defines the desired state of SAMLFederation + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoCreateAccountOnLogin: + description: Add new users automatically on successful authentication. + The user will get the resource-manager.clouds.member role automatically, + but you need to grant other roles to them. If the value is false, + users who aren't added to the cloud can't log in, even if they + have authenticated on your server. + type: boolean + caseInsensitiveNameIds: + description: Use case-insensitive name ids. + type: boolean + cookieMaxAge: + description: The lifetime of a Browser cookie in seconds. If the + cookie is still valid, the management console authenticates + the user immediately and redirects them to the home page. The + default value is 8h. + type: string + description: + description: The description of the SAML Federation. + type: string + issuer: + description: The ID of the IdP server to be used for authentication. + The IdP server also responds to IAM with this ID after the user + authenticates. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs assigned to the SAML + Federation. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the SAML Federation. + type: string + organizationId: + description: The organization to attach this SAML Federation to. + type: string + securitySettings: + description: Federation security settings, structure is documented + below. + items: + properties: + encryptedAssertions: + description: Enable encrypted assertions. + type: boolean + type: object + type: array + ssoBinding: + description: Single sign-on endpoint binding type. Most Identity + Providers support the POST binding type. SAML Binding is a mapping + of a SAML protocol message onto standard messaging formats and/or + communications protocols. + type: string + ssoUrl: + description: Single sign-on endpoint URL. Specify the link to + the IdP login page here. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoCreateAccountOnLogin: + description: Add new users automatically on successful authentication. + The user will get the resource-manager.clouds.member role automatically, + but you need to grant other roles to them. If the value is false, + users who aren't added to the cloud can't log in, even if they + have authenticated on your server. + type: boolean + caseInsensitiveNameIds: + description: Use case-insensitive name ids. + type: boolean + cookieMaxAge: + description: The lifetime of a Browser cookie in seconds. If the + cookie is still valid, the management console authenticates + the user immediately and redirects them to the home page. The + default value is 8h. + type: string + description: + description: The description of the SAML Federation. + type: string + issuer: + description: The ID of the IdP server to be used for authentication. + The IdP server also responds to IAM with this ID after the user + authenticates. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs assigned to the SAML + Federation. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the SAML Federation. + type: string + organizationId: + description: The organization to attach this SAML Federation to. + type: string + securitySettings: + description: Federation security settings, structure is documented + below. + items: + properties: + encryptedAssertions: + description: Enable encrypted assertions. + type: boolean + type: object + type: array + ssoBinding: + description: Single sign-on endpoint binding type. Most Identity + Providers support the POST binding type. SAML Binding is a mapping + of a SAML protocol message onto standard messaging formats and/or + communications protocols. + type: string + ssoUrl: + description: Single sign-on endpoint URL. Specify the link to + the IdP login page here. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.issuer is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.issuer) + || (has(self.initProvider) && has(self.initProvider.issuer))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.organizationId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.organizationId) + || (has(self.initProvider) && has(self.initProvider.organizationId))' + - message: spec.forProvider.ssoBinding is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ssoBinding) + || (has(self.initProvider) && has(self.initProvider.ssoBinding))' + - message: spec.forProvider.ssoUrl is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ssoUrl) + || (has(self.initProvider) && has(self.initProvider.ssoUrl))' + status: + description: SAMLFederationStatus defines the observed state of SAMLFederation. + properties: + atProvider: + properties: + autoCreateAccountOnLogin: + description: Add new users automatically on successful authentication. + The user will get the resource-manager.clouds.member role automatically, + but you need to grant other roles to them. If the value is false, + users who aren't added to the cloud can't log in, even if they + have authenticated on your server. + type: boolean + caseInsensitiveNameIds: + description: Use case-insensitive name ids. + type: boolean + cookieMaxAge: + description: The lifetime of a Browser cookie in seconds. If the + cookie is still valid, the management console authenticates + the user immediately and redirects them to the home page. The + default value is 8h. + type: string + createdAt: + description: (Computed) The SAML Federation creation timestamp. + type: string + description: + description: The description of the SAML Federation. + type: string + id: + type: string + issuer: + description: The ID of the IdP server to be used for authentication. + The IdP server also responds to IAM with this ID after the user + authenticates. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs assigned to the SAML + Federation. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the SAML Federation. + type: string + organizationId: + description: The organization to attach this SAML Federation to. + type: string + securitySettings: + description: Federation security settings, structure is documented + below. + items: + properties: + encryptedAssertions: + description: Enable encrypted assertions. + type: boolean + type: object + type: array + ssoBinding: + description: Single sign-on endpoint binding type. Most Identity + Providers support the POST binding type. SAML Binding is a mapping + of a SAML protocol message onto standard messaging formats and/or + communications protocols. + type: string + ssoUrl: + description: Single sign-on endpoint URL. Specify the link to + the IdP login page here. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_samlfederationuseraccounts.yaml b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_samlfederationuseraccounts.yaml new file mode 100644 index 0000000..77e62d2 --- /dev/null +++ b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_samlfederationuseraccounts.yaml @@ -0,0 +1,356 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: samlfederationuseraccounts.organizationmanager.yandex-cloud.upjet.crossplane.io +spec: + group: organizationmanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SAMLFederationUserAccount + listKind: SAMLFederationUserAccountList + plural: samlfederationuseraccounts + singular: samlfederationuseraccount + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SAMLFederationUserAccount is the Schema for the SAMLFederationUserAccounts + API. Allows management of a single SAML Federation user account within an + existing Yandex.Cloud Organization. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SAMLFederationUserAccountSpec defines the desired state of + SAMLFederationUserAccount + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + federationId: + description: ID of a SAML Federation. + type: string + nameId: + description: Name ID of the SAML federated user. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + federationId: + description: ID of a SAML Federation. + type: string + nameId: + description: Name ID of the SAML federated user. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.federationId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.federationId) + || (has(self.initProvider) && has(self.initProvider.federationId))' + - message: spec.forProvider.nameId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.nameId) + || (has(self.initProvider) && has(self.initProvider.nameId))' + status: + description: SAMLFederationUserAccountStatus defines the observed state + of SAMLFederationUserAccount. + properties: + atProvider: + properties: + federationId: + description: ID of a SAML Federation. + type: string + id: + type: string + nameId: + description: Name ID of the SAML federated user. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_usersshkeys.yaml b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_usersshkeys.yaml new file mode 100644 index 0000000..c7494ac --- /dev/null +++ b/package/crds/organizationmanager.yandex-cloud.upjet.crossplane.io_usersshkeys.yaml @@ -0,0 +1,391 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: usersshkeys.organizationmanager.yandex-cloud.upjet.crossplane.io +spec: + group: organizationmanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: UserSSHKey + listKind: UserSSHKeyList + plural: usersshkeys + singular: usersshkey + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: UserSSHKey is the Schema for the UserSSHKeys API. Allows management + of User Ssh Keys within an existing Yandex.Cloud Organization and Subject. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserSSHKeySpec defines the desired state of UserSSHKey + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + data: + description: Data of the user ssh key. + type: string + expiresAt: + description: User ssh key will be no longer valid after expiration + timestamp. + type: string + name: + description: Name of the user ssh key. + type: string + organizationId: + description: Organization that the user ssh key belongs to. + type: string + subjectId: + description: Subject that the user ssh key belongs to. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + data: + description: Data of the user ssh key. + type: string + expiresAt: + description: User ssh key will be no longer valid after expiration + timestamp. + type: string + name: + description: Name of the user ssh key. + type: string + organizationId: + description: Organization that the user ssh key belongs to. + type: string + subjectId: + description: Subject that the user ssh key belongs to. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.data is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.data) + || (has(self.initProvider) && has(self.initProvider.data))' + - message: spec.forProvider.organizationId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.organizationId) + || (has(self.initProvider) && has(self.initProvider.organizationId))' + - message: spec.forProvider.subjectId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.subjectId) + || (has(self.initProvider) && has(self.initProvider.subjectId))' + status: + description: UserSSHKeyStatus defines the observed state of UserSSHKey. + properties: + atProvider: + properties: + createdAt: + type: string + data: + description: Data of the user ssh key. + type: string + expiresAt: + description: User ssh key will be no longer valid after expiration + timestamp. + type: string + fingerprint: + type: string + id: + type: string + name: + description: Name of the user ssh key. + type: string + organizationId: + description: Organization that the user ssh key belongs to. + type: string + subjectId: + description: Subject that the user ssh key belongs to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_cloudiambindings.yaml b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_cloudiambindings.yaml new file mode 100644 index 0000000..12327ed --- /dev/null +++ b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_cloudiambindings.yaml @@ -0,0 +1,534 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: cloudiambindings.resourcemanager.yandex-cloud.upjet.crossplane.io +spec: + group: resourcemanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CloudIAMBinding + listKind: CloudIAMBindingList + plural: cloudiambindings + singular: cloudiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CloudIAMBinding is the Schema for the CloudIAMBindings API. Allows + management of a single IAM binding for a Yandex Resource Manager cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudIAMBindingSpec defines the desired state of CloudIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cloudId: + description: ID of the cloud to attach the policy to. + type: string + cloudIdRef: + description: Reference to a Cloud to populate cloudId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudIdSelector: + description: Selector for a Cloud to populate cloudId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_resourcemanager_cloud_iam_binding + can be used per role. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cloudId: + description: ID of the cloud to attach the policy to. + type: string + cloudIdRef: + description: Reference to a Cloud to populate cloudId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudIdSelector: + description: Selector for a Cloud to populate cloudId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_resourcemanager_cloud_iam_binding + can be used per role. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: CloudIAMBindingStatus defines the observed state of CloudIAMBinding. + properties: + atProvider: + properties: + cloudId: + description: ID of the cloud to attach the policy to. + type: string + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege in the role. Each entry can have one of the following + values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_resourcemanager_cloud_iam_binding + can be used per role. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_cloudiammembers.yaml b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_cloudiammembers.yaml new file mode 100644 index 0000000..2a7fec9 --- /dev/null +++ b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_cloudiammembers.yaml @@ -0,0 +1,523 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: cloudiammembers.resourcemanager.yandex-cloud.upjet.crossplane.io +spec: + group: resourcemanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CloudIAMMember + listKind: CloudIAMMemberList + plural: cloudiammembers + singular: cloudiammember + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CloudIAMMember is the Schema for the CloudIAMMembers API. Allows + management of a single member for a single IAM binding on a Yandex Resource + Manager cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudIAMMemberSpec defines the desired state of CloudIAMMember + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cloudId: + description: ID of the cloud to attach a policy to. + type: string + cloudIdRef: + description: Reference to a Cloud to populate cloudId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudIdSelector: + description: Selector for a Cloud to populate cloudId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cloudId: + description: ID of the cloud to attach a policy to. + type: string + cloudIdRef: + description: Reference to a Cloud to populate cloudId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudIdSelector: + description: Selector for a Cloud to populate cloudId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.member is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.member) + || (has(self.initProvider) && has(self.initProvider.member))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: CloudIAMMemberStatus defines the observed state of CloudIAMMember. + properties: + atProvider: + properties: + cloudId: + description: ID of the cloud to attach a policy to. + type: string + id: + type: string + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_clouds.yaml b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_clouds.yaml new file mode 100644 index 0000000..0f397cf --- /dev/null +++ b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_clouds.yaml @@ -0,0 +1,379 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clouds.resourcemanager.yandex-cloud.upjet.crossplane.io +spec: + group: resourcemanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Cloud + listKind: CloudList + plural: clouds + singular: cloud + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Cloud is the Schema for the Clouds API. Allows management of + the Cloud resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudSpec defines the desired state of Cloud + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description of the Cloud. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Cloud. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Cloud. + type: string + organizationId: + description: Yandex.Cloud Organization that the cloud belongs + to. If value is omitted, the default provider Organization ID + is used. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description of the Cloud. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Cloud. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Cloud. + type: string + organizationId: + description: Yandex.Cloud Organization that the cloud belongs + to. If value is omitted, the default provider Organization ID + is used. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: CloudStatus defines the observed state of Cloud. + properties: + atProvider: + properties: + createdAt: + type: string + description: + description: A description of the Cloud. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Cloud. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Cloud. + type: string + organizationId: + description: Yandex.Cloud Organization that the cloud belongs + to. If value is omitted, the default provider Organization ID + is used. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folderiambindings.yaml b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folderiambindings.yaml new file mode 100644 index 0000000..5e9751d --- /dev/null +++ b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folderiambindings.yaml @@ -0,0 +1,535 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: folderiambindings.resourcemanager.yandex-cloud.upjet.crossplane.io +spec: + group: resourcemanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: FolderIAMBinding + listKind: FolderIAMBindingList + plural: folderiambindings + singular: folderiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: FolderIAMBinding is the Schema for the FolderIAMBindings API. + Allows management of a single IAM binding for a Yandex Resource Manager + folder. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FolderIAMBindingSpec defines the desired state of FolderIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + folderId: + description: ID of the folder to attach a policy to. + type: string + folderIdRef: + description: Reference to a Folder to populate folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder to populate folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'An array of identities that will be granted the + privilege that is specified in the role field. Each entry can + have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_resourcemanager_folder_iam_binding + can be used per role. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + folderId: + description: ID of the folder to attach a policy to. + type: string + folderIdRef: + description: Reference to a Folder to populate folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder to populate folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'An array of identities that will be granted the + privilege that is specified in the role field. Each entry can + have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_resourcemanager_folder_iam_binding + can be used per role. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: FolderIAMBindingStatus defines the observed state of FolderIAMBinding. + properties: + atProvider: + properties: + folderId: + description: ID of the folder to attach a policy to. + type: string + id: + type: string + members: + description: 'An array of identities that will be granted the + privilege that is specified in the role field. Each entry can + have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be assigned. Only one yandex_resourcemanager_folder_iam_binding + can be used per role. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folderiammembers.yaml b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folderiammembers.yaml new file mode 100644 index 0000000..a4a67fe --- /dev/null +++ b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folderiammembers.yaml @@ -0,0 +1,523 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: folderiammembers.resourcemanager.yandex-cloud.upjet.crossplane.io +spec: + group: resourcemanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: FolderIAMMember + listKind: FolderIAMMemberList + plural: folderiammembers + singular: folderiammember + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: FolderIAMMember is the Schema for the FolderIAMMembers API. Allows + management of a single member for a single IAM binding for a Yandex Resource + Manager folder. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FolderIAMMemberSpec defines the desired state of FolderIAMMember + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + folderId: + description: ID of the folder to attach a policy to. + type: string + folderIdRef: + description: Reference to a Folder to populate folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder to populate folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + folderId: + description: ID of the folder to attach a policy to. + type: string + folderIdRef: + description: Reference to a Folder to populate folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder to populate folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.member is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.member) + || (has(self.initProvider) && has(self.initProvider.member))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: FolderIAMMemberStatus defines the observed state of FolderIAMMember. + properties: + atProvider: + properties: + folderId: + description: ID of the folder to attach a policy to. + type: string + id: + type: string + member: + description: 'The identity that will be granted the privilege + that is specified in the role field. This field can have one + of the following values:' + type: string + role: + description: The role that should be assigned. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folderiampolicies.yaml b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folderiampolicies.yaml new file mode 100644 index 0000000..43eb3da --- /dev/null +++ b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folderiampolicies.yaml @@ -0,0 +1,503 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: folderiampolicies.resourcemanager.yandex-cloud.upjet.crossplane.io +spec: + group: resourcemanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: FolderIAMPolicy + listKind: FolderIAMPolicyList + plural: folderiampolicies + singular: folderiampolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: FolderIAMPolicy is the Schema for the FolderIAMPolicys API. Allows + management of the IAM policy for a Yandex Resource Manager folder. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FolderIAMPolicySpec defines the desired state of FolderIAMPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + folderId: + description: ID of the folder that the policy is attached to. + type: string + folderIdRef: + description: Reference to a Folder to populate folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder to populate folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + policyData: + description: The yandex_iam_policy data source that represents + the IAM policy that will be applied to the folder. This policy + overrides any existing policy applied to the folder. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + folderId: + description: ID of the folder that the policy is attached to. + type: string + folderIdRef: + description: Reference to a Folder to populate folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder to populate folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + policyData: + description: The yandex_iam_policy data source that represents + the IAM policy that will be applied to the folder. This policy + overrides any existing policy applied to the folder. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.policyData is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.policyData) + || (has(self.initProvider) && has(self.initProvider.policyData))' + status: + description: FolderIAMPolicyStatus defines the observed state of FolderIAMPolicy. + properties: + atProvider: + properties: + folderId: + description: ID of the folder that the policy is attached to. + type: string + id: + type: string + policyData: + description: The yandex_iam_policy data source that represents + the IAM policy that will be applied to the folder. This policy + overrides any existing policy applied to the folder. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folders.yaml b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folders.yaml new file mode 100644 index 0000000..8633d2a --- /dev/null +++ b/package/crds/resourcemanager.yandex-cloud.upjet.crossplane.io_folders.yaml @@ -0,0 +1,524 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: folders.resourcemanager.yandex-cloud.upjet.crossplane.io +spec: + group: resourcemanager.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Folder + listKind: FolderList + plural: folders + singular: folder + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Folder is the Schema for the Folders API. Allows management of + the Cloud Folder. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FolderSpec defines the desired state of Folder + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cloudId: + description: Cloud that the resource belongs to. If value is omitted, + the default provider Cloud ID is used. + type: string + cloudIdRef: + description: Reference to a Cloud to populate cloudId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudIdSelector: + description: Selector for a Cloud to populate cloudId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: A description of the Folder. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Folder. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Folder. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cloudId: + description: Cloud that the resource belongs to. If value is omitted, + the default provider Cloud ID is used. + type: string + cloudIdRef: + description: Reference to a Cloud to populate cloudId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudIdSelector: + description: Selector for a Cloud to populate cloudId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: A description of the Folder. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Folder. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Folder. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: FolderStatus defines the observed state of Folder. + properties: + atProvider: + properties: + cloudId: + description: Cloud that the resource belongs to. If value is omitted, + the default provider Cloud ID is used. + type: string + createdAt: + type: string + description: + description: A description of the Folder. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Folder. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Folder. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containeriambindings.yaml b/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containeriambindings.yaml new file mode 100644 index 0000000..1c764c9 --- /dev/null +++ b/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containeriambindings.yaml @@ -0,0 +1,387 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: containeriambindings.serverless.yandex-cloud.upjet.crossplane.io +spec: + group: serverless.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ContainerIAMBinding + listKind: ContainerIAMBindingList + plural: containeriambindings + singular: containeriambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ContainerIAMBinding is the Schema for the ContainerIAMBindings + API. Allows management of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ContainerIAMBindingSpec defines the desired state of ContainerIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerId: + description: The Yandex Serverless Container ID to apply a binding + to. + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerId: + description: The Yandex Serverless Container ID to apply a binding + to. + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.containerId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.containerId) + || (has(self.initProvider) && has(self.initProvider.containerId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: ContainerIAMBindingStatus defines the observed state of ContainerIAMBinding. + properties: + atProvider: + properties: + containerId: + description: The Yandex Serverless Container ID to apply a binding + to. + type: string + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containers.yaml b/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containers.yaml new file mode 100644 index 0000000..b785f37 --- /dev/null +++ b/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containers.yaml @@ -0,0 +1,1719 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: containers.serverless.yandex-cloud.upjet.crossplane.io +spec: + group: serverless.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Container + listKind: ContainerList + plural: containers + singular: container + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Container is the Schema for the Containers API. Allows management + of a Yandex Cloud Serverless Container. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ContainerSpec defines the desired state of Container + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + concurrency: + description: Concurrency of Yandex Cloud Serverless Container + type: number + connectivity: + description: Network access. If specified the revision will be + attached to specified network + items: + properties: + networkId: + description: Network the revision will have access to + type: string + type: object + type: array + coreFraction: + description: Core fraction (0...100) of the Yandex Cloud Serverless + Container + type: number + cores: + type: number + description: + description: Description of the Yandex Cloud Serverless Container + type: string + executionTimeout: + description: Execution timeout in seconds (duration format) for + Yandex Cloud Serverless Container + type: string + folderId: + description: Folder ID for the Yandex Cloud Serverless Container + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + image: + description: Revision deployment image for Yandex Cloud Serverless + Container + items: + properties: + args: + description: List of arguments for Yandex Cloud Serverless + Container + items: + type: string + type: array + command: + description: List of commands for Yandex Cloud Serverless + Container + items: + type: string + type: array + digest: + description: Digest of image that will be deployed as Yandex + Cloud Serverless Container. If presented, should be equal + to digest that will be resolved at server side by URL. + Container will be updated on digest change even if image.0.url + stays the same. If field not specified then its value + will be computed. + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variable pairs + for Yandex Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + url: + description: Invoke URL for the Yandex Cloud Serverless + Container + type: string + workDir: + description: Working directory for Yandex Cloud Serverless + Container + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Serverless + Container + items: + properties: + disabled: + description: Is logging from container disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: |- + Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + Container memory in megabytes, should be aligned to 128 + type: number + mounts: + description: Mounts for Yandex Cloud Serverless Container + items: + properties: + ephemeralDisk: + description: One of the available mount types. Disk available + during the function execution time + items: + properties: + blockSizeKb: + description: Optional block size of the ephemeral + disk in KB + type: number + sizeGb: + description: Size of the ephemeral disk in GB + type: number + type: object + type: array + mode: + description: Mount’s accessibility mode. Valid values are + ro and rw + type: string + mountPointPath: + description: Path inside the container to access the directory + in which the bucket is mounted + type: string + objectStorage: + description: One of the available mount types. Object storage + as a mount + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + bucketRef: + description: Reference to a Bucket in storage to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in storage to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + prefix: + description: Prefix within the bucket. If you leave + this field empty, the entire bucket will be mounted + type: string + type: object + type: array + type: object + type: array + name: + description: Yandex Cloud Serverless Container name + type: string + provisionPolicy: + description: Provision policy. If specified the revision will + have prepared instances + items: + properties: + minInstances: + description: Minimum number of prepared instances that are + always ready to serve requests + type: number + type: object + type: array + secrets: + description: Secrets for Yandex Cloud Serverless Container + items: + properties: + environmentVariable: + description: Container's environment variable in which secret's + value will be stored + type: string + id: + description: Secret's id + type: string + idRef: + description: Reference to a Secret in lockbox to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Secret in lockbox to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Secret's entries key which value will be stored + in environment variable + type: string + versionId: + description: Secret's version id + type: string + versionIdRef: + description: Reference to a SecretVersion in lockbox to + populate versionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + versionIdSelector: + description: Selector for a SecretVersion in lockbox to + populate versionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Serverless Container + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageMounts: + description: (DEPRECATED, use mounts.0.object_storage instead) + Storage mounts for Yandex Cloud Serverless Container + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + mountPointPath: + description: Path inside the container to access the directory + in which the bucket is mounted + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted + type: string + readOnly: + description: Mount the bucket in read-only mode + type: boolean + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + concurrency: + description: Concurrency of Yandex Cloud Serverless Container + type: number + connectivity: + description: Network access. If specified the revision will be + attached to specified network + items: + properties: + networkId: + description: Network the revision will have access to + type: string + type: object + type: array + coreFraction: + description: Core fraction (0...100) of the Yandex Cloud Serverless + Container + type: number + cores: + type: number + description: + description: Description of the Yandex Cloud Serverless Container + type: string + executionTimeout: + description: Execution timeout in seconds (duration format) for + Yandex Cloud Serverless Container + type: string + folderId: + description: Folder ID for the Yandex Cloud Serverless Container + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + image: + description: Revision deployment image for Yandex Cloud Serverless + Container + items: + properties: + args: + description: List of arguments for Yandex Cloud Serverless + Container + items: + type: string + type: array + command: + description: List of commands for Yandex Cloud Serverless + Container + items: + type: string + type: array + digest: + description: Digest of image that will be deployed as Yandex + Cloud Serverless Container. If presented, should be equal + to digest that will be resolved at server side by URL. + Container will be updated on digest change even if image.0.url + stays the same. If field not specified then its value + will be computed. + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variable pairs + for Yandex Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + url: + description: Invoke URL for the Yandex Cloud Serverless + Container + type: string + workDir: + description: Working directory for Yandex Cloud Serverless + Container + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Serverless + Container + items: + properties: + disabled: + description: Is logging from container disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: |- + Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + Container memory in megabytes, should be aligned to 128 + type: number + mounts: + description: Mounts for Yandex Cloud Serverless Container + items: + properties: + ephemeralDisk: + description: One of the available mount types. Disk available + during the function execution time + items: + properties: + blockSizeKb: + description: Optional block size of the ephemeral + disk in KB + type: number + sizeGb: + description: Size of the ephemeral disk in GB + type: number + type: object + type: array + mode: + description: Mount’s accessibility mode. Valid values are + ro and rw + type: string + mountPointPath: + description: Path inside the container to access the directory + in which the bucket is mounted + type: string + objectStorage: + description: One of the available mount types. Object storage + as a mount + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + bucketRef: + description: Reference to a Bucket in storage to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in storage to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + prefix: + description: Prefix within the bucket. If you leave + this field empty, the entire bucket will be mounted + type: string + type: object + type: array + type: object + type: array + name: + description: Yandex Cloud Serverless Container name + type: string + provisionPolicy: + description: Provision policy. If specified the revision will + have prepared instances + items: + properties: + minInstances: + description: Minimum number of prepared instances that are + always ready to serve requests + type: number + type: object + type: array + secrets: + description: Secrets for Yandex Cloud Serverless Container + items: + properties: + environmentVariable: + description: Container's environment variable in which secret's + value will be stored + type: string + id: + description: Secret's id + type: string + idRef: + description: Reference to a Secret in lockbox to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Secret in lockbox to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Secret's entries key which value will be stored + in environment variable + type: string + versionId: + description: Secret's version id + type: string + versionIdRef: + description: Reference to a SecretVersion in lockbox to + populate versionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + versionIdSelector: + description: Selector for a SecretVersion in lockbox to + populate versionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Serverless Container + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageMounts: + description: (DEPRECATED, use mounts.0.object_storage instead) + Storage mounts for Yandex Cloud Serverless Container + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + mountPointPath: + description: Path inside the container to access the directory + in which the bucket is mounted + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted + type: string + readOnly: + description: Mount the bucket in read-only mode + type: boolean + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.image is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.image) + || (has(self.initProvider) && has(self.initProvider.image))' + - message: spec.forProvider.memory is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.memory) + || (has(self.initProvider) && has(self.initProvider.memory))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ContainerStatus defines the observed state of Container. + properties: + atProvider: + properties: + concurrency: + description: Concurrency of Yandex Cloud Serverless Container + type: number + connectivity: + description: Network access. If specified the revision will be + attached to specified network + items: + properties: + networkId: + description: Network the revision will have access to + type: string + type: object + type: array + coreFraction: + description: Core fraction (0...100) of the Yandex Cloud Serverless + Container + type: number + cores: + type: number + createdAt: + description: Creation timestamp of the Yandex Cloud Serverless + Container + type: string + description: + description: Description of the Yandex Cloud Serverless Container + type: string + executionTimeout: + description: Execution timeout in seconds (duration format) for + Yandex Cloud Serverless Container + type: string + folderId: + description: Folder ID for the Yandex Cloud Serverless Container + type: string + id: + description: Secret's id + type: string + image: + description: Revision deployment image for Yandex Cloud Serverless + Container + items: + properties: + args: + description: List of arguments for Yandex Cloud Serverless + Container + items: + type: string + type: array + command: + description: List of commands for Yandex Cloud Serverless + Container + items: + type: string + type: array + digest: + description: Digest of image that will be deployed as Yandex + Cloud Serverless Container. If presented, should be equal + to digest that will be resolved at server side by URL. + Container will be updated on digest change even if image.0.url + stays the same. If field not specified then its value + will be computed. + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variable pairs + for Yandex Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + url: + description: Invoke URL for the Yandex Cloud Serverless + Container + type: string + workDir: + description: Working directory for Yandex Cloud Serverless + Container + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Serverless + Container + items: + properties: + disabled: + description: Is logging from container disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: |- + Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + Container memory in megabytes, should be aligned to 128 + type: number + mounts: + description: Mounts for Yandex Cloud Serverless Container + items: + properties: + ephemeralDisk: + description: One of the available mount types. Disk available + during the function execution time + items: + properties: + blockSizeKb: + description: Optional block size of the ephemeral + disk in KB + type: number + sizeGb: + description: Size of the ephemeral disk in GB + type: number + type: object + type: array + mode: + description: Mount’s accessibility mode. Valid values are + ro and rw + type: string + mountPointPath: + description: Path inside the container to access the directory + in which the bucket is mounted + type: string + objectStorage: + description: One of the available mount types. Object storage + as a mount + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + prefix: + description: Prefix within the bucket. If you leave + this field empty, the entire bucket will be mounted + type: string + type: object + type: array + type: object + type: array + name: + description: Yandex Cloud Serverless Container name + type: string + provisionPolicy: + description: Provision policy. If specified the revision will + have prepared instances + items: + properties: + minInstances: + description: Minimum number of prepared instances that are + always ready to serve requests + type: number + type: object + type: array + revisionId: + description: Last revision ID of the Yandex Cloud Serverless Container + type: string + secrets: + description: Secrets for Yandex Cloud Serverless Container + items: + properties: + environmentVariable: + description: Container's environment variable in which secret's + value will be stored + type: string + id: + description: Secret's id + type: string + key: + description: Secret's entries key which value will be stored + in environment variable + type: string + versionId: + description: Secret's version id + type: string + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Serverless Container + type: string + storageMounts: + description: (DEPRECATED, use mounts.0.object_storage instead) + Storage mounts for Yandex Cloud Serverless Container + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + mountPointPath: + description: Path inside the container to access the directory + in which the bucket is mounted + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted + type: string + readOnly: + description: Mount the bucket in read-only mode + type: boolean + type: object + type: array + url: + description: Invoke URL for the Yandex Cloud Serverless Container + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/smartcaptcha.yandex-cloud.upjet.crossplane.io_captchas.yaml b/package/crds/smartcaptcha.yandex-cloud.upjet.crossplane.io_captchas.yaml new file mode 100644 index 0000000..bffd09e --- /dev/null +++ b/package/crds/smartcaptcha.yandex-cloud.upjet.crossplane.io_captchas.yaml @@ -0,0 +1,1285 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: captchas.smartcaptcha.yandex-cloud.upjet.crossplane.io +spec: + group: smartcaptcha.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Captcha + listKind: CaptchaList + plural: captchas + singular: captcha + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Captcha is the Schema for the Captchas API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CaptchaSpec defines the desired state of Captcha + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowedSites: + description: (List of String) + items: + type: string + type: array + challengeType: + description: (String) + type: string + cloudId: + description: (String) + type: string + complexity: + description: (String) + type: string + deletionProtection: + description: (Boolean) + type: boolean + folderId: + description: (String) + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: (String) + type: string + overrideVariant: + description: (Block List) (see below for nested schema) + items: + properties: + challengeType: + description: (String) + type: string + complexity: + description: (String) + type: string + description: + description: (String) + type: string + preCheckType: + description: (String) + type: string + uuid: + description: (String) + type: string + type: object + type: array + preCheckType: + description: (String) + type: string + securityRule: + description: (Block List) (see below for nested schema) + items: + properties: + condition: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + headers: + description: (Block List) (see below for nested schema) + items: + properties: + name: + description: (String) + type: string + value: + description: '(Block List, Min: 1, Max: 1) (see + below for nested schema)' + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + type: object + type: array + host: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + hosts: + description: (Block List) (see below for nested + schema) + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + type: object + type: array + sourceIp: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + geoIpMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + locations: + description: (List of String) + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + locations: + description: (List of String) + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + ipRanges: + description: (List of String) + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + ipRanges: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + uri: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + path: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + queries: + description: (Block List) (see below for nested + schema) + items: + properties: + key: + description: (String) + type: string + value: + description: '(Block List, Min: 1, Max: + 1) (see below for nested schema)' + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + description: + description: (String) + type: string + name: + description: (String) + type: string + overrideVariantUuid: + description: (String) + type: string + priority: + description: (Number) + type: number + type: object + type: array + styleJson: + description: (String) + type: string + turnOffHostnameCheck: + description: (Boolean) + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowedSites: + description: (List of String) + items: + type: string + type: array + challengeType: + description: (String) + type: string + cloudId: + description: (String) + type: string + complexity: + description: (String) + type: string + deletionProtection: + description: (Boolean) + type: boolean + folderId: + description: (String) + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: (String) + type: string + overrideVariant: + description: (Block List) (see below for nested schema) + items: + properties: + challengeType: + description: (String) + type: string + complexity: + description: (String) + type: string + description: + description: (String) + type: string + preCheckType: + description: (String) + type: string + uuid: + description: (String) + type: string + type: object + type: array + preCheckType: + description: (String) + type: string + securityRule: + description: (Block List) (see below for nested schema) + items: + properties: + condition: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + headers: + description: (Block List) (see below for nested schema) + items: + properties: + name: + description: (String) + type: string + value: + description: '(Block List, Min: 1, Max: 1) (see + below for nested schema)' + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + type: object + type: array + host: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + hosts: + description: (Block List) (see below for nested + schema) + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + type: object + type: array + sourceIp: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + geoIpMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + locations: + description: (List of String) + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + locations: + description: (List of String) + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + ipRanges: + description: (List of String) + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + ipRanges: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + uri: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + path: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + queries: + description: (Block List) (see below for nested + schema) + items: + properties: + key: + description: (String) + type: string + value: + description: '(Block List, Min: 1, Max: + 1) (see below for nested schema)' + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + description: + description: (String) + type: string + name: + description: (String) + type: string + overrideVariantUuid: + description: (String) + type: string + priority: + description: (Number) + type: number + type: object + type: array + styleJson: + description: (String) + type: string + turnOffHostnameCheck: + description: (Boolean) + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: CaptchaStatus defines the observed state of Captcha. + properties: + atProvider: + properties: + allowedSites: + description: (List of String) + items: + type: string + type: array + challengeType: + description: (String) + type: string + clientKey: + description: (String) + type: string + cloudId: + description: (String) + type: string + complexity: + description: (String) + type: string + createdAt: + description: (String) + type: string + deletionProtection: + description: (Boolean) + type: boolean + folderId: + description: (String) + type: string + id: + description: (String) The ID of this resource. + type: string + name: + description: (String) + type: string + overrideVariant: + description: (Block List) (see below for nested schema) + items: + properties: + challengeType: + description: (String) + type: string + complexity: + description: (String) + type: string + description: + description: (String) + type: string + preCheckType: + description: (String) + type: string + uuid: + description: (String) + type: string + type: object + type: array + preCheckType: + description: (String) + type: string + securityRule: + description: (Block List) (see below for nested schema) + items: + properties: + condition: + description: '(Block List, Max: 1) (see below for nested + schema)' + items: + properties: + headers: + description: (Block List) (see below for nested schema) + items: + properties: + name: + description: (String) + type: string + value: + description: '(Block List, Min: 1, Max: 1) (see + below for nested schema)' + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + type: object + type: array + host: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + hosts: + description: (Block List) (see below for nested + schema) + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + type: object + type: array + sourceIp: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + geoIpMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + locations: + description: (List of String) + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + locations: + description: (List of String) + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + ipRanges: + description: (List of String) + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + ipRanges: + description: (List of String) + items: + type: string + type: array + type: object + type: array + type: object + type: array + uri: + description: '(Block List, Max: 1) (see below for + nested schema)' + items: + properties: + path: + description: '(Block List, Max: 1) (see below + for nested schema)' + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + queries: + description: (Block List) (see below for nested + schema) + items: + properties: + key: + description: (String) + type: string + value: + description: '(Block List, Min: 1, Max: + 1) (see below for nested schema)' + items: + properties: + exactMatch: + description: (String) + type: string + exactNotMatch: + description: (String) + type: string + pireRegexMatch: + description: (String) + type: string + pireRegexNotMatch: + description: (String) + type: string + prefixMatch: + description: (String) + type: string + prefixNotMatch: + description: (String) + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + description: + description: (String) + type: string + name: + description: (String) + type: string + overrideVariantUuid: + description: (String) + type: string + priority: + description: (Number) + type: number + type: object + type: array + styleJson: + description: (String) + type: string + suspend: + description: (Boolean) + type: boolean + turnOffHostnameCheck: + description: (Boolean) + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/storage.yandex-cloud.upjet.crossplane.io_buckets.yaml b/package/crds/storage.yandex-cloud.upjet.crossplane.io_buckets.yaml new file mode 100644 index 0000000..5d6f947 --- /dev/null +++ b/package/crds/storage.yandex-cloud.upjet.crossplane.io_buckets.yaml @@ -0,0 +1,2176 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: buckets.storage.yandex-cloud.upjet.crossplane.io +spec: + group: storage.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Bucket + listKind: BucketList + plural: buckets + singular: bucket + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Bucket is the Schema for the Buckets API. Allows management of + a Yandex.Cloud Storage Bucket. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketSpec defines the desired state of Bucket + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessKey: + description: The access key to use when applying changes. This + value can also be provided as storage_access_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + type: string + accessKeyRef: + description: Reference to a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessKeySelector: + description: Selector for a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + acl: + description: The predefined ACL to apply. Defaults to private. + Conflicts with grant. + type: string + anonymousAccessFlags: + description: Provides various access to objects. See bucket availability + for more infomation. + items: + properties: + configRead: + description: Allows to read objects in bucket anonymously. + type: boolean + list: + description: Allows to list object in bucket anonymously. + type: boolean + read: + description: Allows to read objects in bucket anonymously. + type: boolean + type: object + type: array + bucket: + description: The name of the bucket. + type: string + bucketPrefix: + description: Creates a unique bucket name beginning with the specified + prefix. Conflicts with bucket. + type: string + corsRule: + description: A rule of Cross-Origin Resource Sharing (documented + below). + items: + properties: + allowedHeaders: + description: Specifies which headers are allowed. + items: + type: string + type: array + allowedMethods: + description: Specifies which methods are allowed. Can be + GET, PUT, POST, DELETE or HEAD. + items: + type: string + type: array + allowedOrigins: + description: Specifies which origins are allowed. + items: + type: string + type: array + exposeHeaders: + description: Specifies expose header in the response. + items: + type: string + type: array + maxAgeSeconds: + description: Specifies time in seconds that browser can + cache the response for a preflight request. + type: number + type: object + type: array + defaultStorageClass: + description: 'Storage class which is used for storing objects + by default. Available values are: "STANDARD", "COLD", "ICE". + Default is "STANDARD". See storage class for more inforamtion.' + type: string + folderId: + description: Allow to create bucket in different folder. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + forceDestroy: + description: A boolean that indicates all objects should be deleted + from the bucket so that the bucket can be destroyed without + error. These objects are not recoverable. + type: boolean + grant: + description: An ACL policy grant. Conflicts with acl. + items: + properties: + id: + description: Canonical user id to grant for. Used only when + type is CanonicalUser. + type: string + permissions: + description: List of permissions to apply for grantee. Valid + values are READ, WRITE, FULL_CONTROL. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Type of grantee to apply for. Valid values + are CanonicalUser and Group. + type: string + uri: + description: Uri address to grant for. Used only when type + is Group. + type: string + type: object + type: array + https: + description: Manages https certificates for bucket. See https + for more infomation. + items: + properties: + certificateId: + description: — Id of the certificate in Certificate Manager, + that will be used for bucket. + type: string + type: object + type: array + lifecycleRule: + description: A configuration of object lifecycle management (documented + below). + items: + properties: + abortIncompleteMultipartUploadDays: + description: Specifies the number of days after initiating + a multipart upload when the multipart upload must be completed. + type: number + enabled: + description: Enable versioning. Once you version-enable + a bucket, it can never return to an unversioned state. + You can, however, suspend versioning on that bucket. + type: boolean + expiration: + description: Specifies a period in the object's expire (documented + below). + items: + properties: + date: + description: Specifies the date after which you want + the corresponding action to take effect. + type: string + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + expiredObjectDeleteMarker: + description: On a versioned bucket (versioning-enabled + or versioning-suspended bucket), you can add this + element in the lifecycle configuration to direct + Object Storage to delete expired object delete markers. + type: boolean + type: object + type: array + filter: + description: 'Filter block identifies one or more objects + to which the rule applies. A Filter must have exactly + one of Prefix, Tag, or And specified. The filter supports + the following options:' + items: + properties: + and: + description: 'operator applied to one or more filter + parameters. It should be used when two or more of + the above parameters are used. It supports the following + parameters:' + items: + properties: + objectSizeGreaterThan: + type: number + objectSizeLessThan: + type: number + prefix: + description: Object key prefix identifying one + or more objects to which the rule applies. + type: string + tags: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + type: array + objectSizeGreaterThan: + type: number + objectSizeLessThan: + type: number + prefix: + description: Object key prefix identifying one or + more objects to which the rule applies. + type: string + tag: + items: + properties: + key: + type: string + value: + type: string + type: object + type: array + type: object + type: array + id: + description: Canonical user id to grant for. Used only when + type is CanonicalUser. + type: string + noncurrentVersionExpiration: + description: Specifies when noncurrent object versions expire + (documented below). + items: + properties: + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + type: object + type: array + noncurrentVersionTransition: + description: Specifies when noncurrent object versions transitions + (documented below). + items: + properties: + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + storageClass: + description: 'Specifies the storage class to which + you want the object to transition. Supported values: + [STANDARD_IA, COLD, ICE].' + type: string + type: object + type: array + prefix: + description: Object key prefix identifying one or more objects + to which the rule applies. + type: string + transition: + description: Specifies a period in the object's transitions + (documented below). + items: + properties: + date: + description: Specifies the date after which you want + the corresponding action to take effect. + type: string + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + storageClass: + description: 'Specifies the storage class to which + you want the object to transition. Supported values: + [STANDARD_IA, COLD, ICE].' + type: string + type: object + type: array + type: object + type: array + logging: + description: A settings of bucket logging (documented below). + items: + properties: + targetBucket: + description: The name of the bucket that will receive the + log objects. + type: string + targetBucketRef: + description: Reference to a Bucket in storage to populate + targetBucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetBucketSelector: + description: Selector for a Bucket in storage to populate + targetBucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetPrefix: + description: To specify a key prefix for log objects. + type: string + type: object + type: array + maxSize: + description: The size of bucket, in bytes. See size limiting for + more information. + type: number + objectLockConfiguration: + description: A configuration of object lock management (documented + below). + items: + properties: + objectLockEnabled: + description: Enable object locking in a bucket. Require + versioning to be enabled. + type: string + rule: + description: Specifies a default locking configuration for + added objects. Require object_lock_enabled to be enabled. + items: + properties: + defaultRetention: + items: + properties: + days: + description: Specifies a retention period in + days after uploading an object version. It + must be a positive integer. You can't set + it simultaneously with years. + type: number + mode: + description: Specifies a type of object lock. + One of ["GOVERNANCE", "COMPLIANCE"]. + type: string + years: + description: Specifies a retention period in + years after uploading an object version. It + must be a positive integer. You can't set + it simultaneously with days. + type: number + type: object + type: array + type: object + type: array + type: object + type: array + policy: + type: string + secretKeySecretRef: + description: The secret key to use when applying changes. This + value can also be provided as storage_secret_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + serverSideEncryptionConfiguration: + description: A configuration of server-side encryption for the + bucket (documented below) + items: + properties: + rule: + description: Specifies a default locking configuration for + added objects. Require object_lock_enabled to be enabled. + items: + properties: + applyServerSideEncryptionByDefault: + description: A single object for setting server-side + encryption by default. (documented below) + items: + properties: + kmsMasterKeyId: + description: The KMS master key ID used for + the SSE-KMS encryption. + type: string + kmsMasterKeyIdRef: + description: Reference to a SymmetricKey in + kms to populate kmsMasterKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsMasterKeyIdSelector: + description: Selector for a SymmetricKey in + kms to populate kmsMasterKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sseAlgorithm: + description: The server-side encryption algorithm + to use. Single valid value is aws:kms + type: string + type: object + type: array + type: object + type: array + type: object + type: array + tags: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + versioning: + description: A state of versioning (documented below) + items: + properties: + enabled: + description: Enable versioning. Once you version-enable + a bucket, it can never return to an unversioned state. + You can, however, suspend versioning on that bucket. + type: boolean + type: object + type: array + website: + description: A website object (documented below). + items: + properties: + errorDocument: + description: An absolute path to the document to return + in case of a 4XX error. + type: string + indexDocument: + description: Storage returns this index document when requests + are made to the root domain or any of the subfolders. + type: string + redirectAllRequestsTo: + description: A hostname to redirect all website requests + for this bucket to. Hostname can optionally be prefixed + with a protocol (http:// or https://) to use when redirecting + requests. The default is the protocol that is used in + the original request. + type: string + routingRules: + description: A json array containing routing rules describing + redirect behavior and when redirects are applied. + type: string + type: object + type: array + websiteDomain: + description: The domain of the website endpoint, if the bucket + is configured with a website. If not, this will be an empty + string. + type: string + websiteEndpoint: + description: The website endpoint, if the bucket is configured + with a website. If not, this will be an empty string. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessKey: + description: The access key to use when applying changes. This + value can also be provided as storage_access_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + type: string + accessKeyRef: + description: Reference to a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessKeySelector: + description: Selector for a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + acl: + description: The predefined ACL to apply. Defaults to private. + Conflicts with grant. + type: string + anonymousAccessFlags: + description: Provides various access to objects. See bucket availability + for more infomation. + items: + properties: + configRead: + description: Allows to read objects in bucket anonymously. + type: boolean + list: + description: Allows to list object in bucket anonymously. + type: boolean + read: + description: Allows to read objects in bucket anonymously. + type: boolean + type: object + type: array + bucket: + description: The name of the bucket. + type: string + bucketPrefix: + description: Creates a unique bucket name beginning with the specified + prefix. Conflicts with bucket. + type: string + corsRule: + description: A rule of Cross-Origin Resource Sharing (documented + below). + items: + properties: + allowedHeaders: + description: Specifies which headers are allowed. + items: + type: string + type: array + allowedMethods: + description: Specifies which methods are allowed. Can be + GET, PUT, POST, DELETE or HEAD. + items: + type: string + type: array + allowedOrigins: + description: Specifies which origins are allowed. + items: + type: string + type: array + exposeHeaders: + description: Specifies expose header in the response. + items: + type: string + type: array + maxAgeSeconds: + description: Specifies time in seconds that browser can + cache the response for a preflight request. + type: number + type: object + type: array + defaultStorageClass: + description: 'Storage class which is used for storing objects + by default. Available values are: "STANDARD", "COLD", "ICE". + Default is "STANDARD". See storage class for more inforamtion.' + type: string + folderId: + description: Allow to create bucket in different folder. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + forceDestroy: + description: A boolean that indicates all objects should be deleted + from the bucket so that the bucket can be destroyed without + error. These objects are not recoverable. + type: boolean + grant: + description: An ACL policy grant. Conflicts with acl. + items: + properties: + id: + description: Canonical user id to grant for. Used only when + type is CanonicalUser. + type: string + permissions: + description: List of permissions to apply for grantee. Valid + values are READ, WRITE, FULL_CONTROL. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Type of grantee to apply for. Valid values + are CanonicalUser and Group. + type: string + uri: + description: Uri address to grant for. Used only when type + is Group. + type: string + type: object + type: array + https: + description: Manages https certificates for bucket. See https + for more infomation. + items: + properties: + certificateId: + description: — Id of the certificate in Certificate Manager, + that will be used for bucket. + type: string + type: object + type: array + lifecycleRule: + description: A configuration of object lifecycle management (documented + below). + items: + properties: + abortIncompleteMultipartUploadDays: + description: Specifies the number of days after initiating + a multipart upload when the multipart upload must be completed. + type: number + enabled: + description: Enable versioning. Once you version-enable + a bucket, it can never return to an unversioned state. + You can, however, suspend versioning on that bucket. + type: boolean + expiration: + description: Specifies a period in the object's expire (documented + below). + items: + properties: + date: + description: Specifies the date after which you want + the corresponding action to take effect. + type: string + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + expiredObjectDeleteMarker: + description: On a versioned bucket (versioning-enabled + or versioning-suspended bucket), you can add this + element in the lifecycle configuration to direct + Object Storage to delete expired object delete markers. + type: boolean + type: object + type: array + filter: + description: 'Filter block identifies one or more objects + to which the rule applies. A Filter must have exactly + one of Prefix, Tag, or And specified. The filter supports + the following options:' + items: + properties: + and: + description: 'operator applied to one or more filter + parameters. It should be used when two or more of + the above parameters are used. It supports the following + parameters:' + items: + properties: + objectSizeGreaterThan: + type: number + objectSizeLessThan: + type: number + prefix: + description: Object key prefix identifying one + or more objects to which the rule applies. + type: string + tags: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + type: array + objectSizeGreaterThan: + type: number + objectSizeLessThan: + type: number + prefix: + description: Object key prefix identifying one or + more objects to which the rule applies. + type: string + tag: + items: + properties: + key: + type: string + value: + type: string + type: object + type: array + type: object + type: array + id: + description: Canonical user id to grant for. Used only when + type is CanonicalUser. + type: string + noncurrentVersionExpiration: + description: Specifies when noncurrent object versions expire + (documented below). + items: + properties: + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + type: object + type: array + noncurrentVersionTransition: + description: Specifies when noncurrent object versions transitions + (documented below). + items: + properties: + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + storageClass: + description: 'Specifies the storage class to which + you want the object to transition. Supported values: + [STANDARD_IA, COLD, ICE].' + type: string + type: object + type: array + prefix: + description: Object key prefix identifying one or more objects + to which the rule applies. + type: string + transition: + description: Specifies a period in the object's transitions + (documented below). + items: + properties: + date: + description: Specifies the date after which you want + the corresponding action to take effect. + type: string + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + storageClass: + description: 'Specifies the storage class to which + you want the object to transition. Supported values: + [STANDARD_IA, COLD, ICE].' + type: string + type: object + type: array + type: object + type: array + logging: + description: A settings of bucket logging (documented below). + items: + properties: + targetBucket: + description: The name of the bucket that will receive the + log objects. + type: string + targetBucketRef: + description: Reference to a Bucket in storage to populate + targetBucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetBucketSelector: + description: Selector for a Bucket in storage to populate + targetBucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetPrefix: + description: To specify a key prefix for log objects. + type: string + type: object + type: array + maxSize: + description: The size of bucket, in bytes. See size limiting for + more information. + type: number + objectLockConfiguration: + description: A configuration of object lock management (documented + below). + items: + properties: + objectLockEnabled: + description: Enable object locking in a bucket. Require + versioning to be enabled. + type: string + rule: + description: Specifies a default locking configuration for + added objects. Require object_lock_enabled to be enabled. + items: + properties: + defaultRetention: + items: + properties: + days: + description: Specifies a retention period in + days after uploading an object version. It + must be a positive integer. You can't set + it simultaneously with years. + type: number + mode: + description: Specifies a type of object lock. + One of ["GOVERNANCE", "COMPLIANCE"]. + type: string + years: + description: Specifies a retention period in + years after uploading an object version. It + must be a positive integer. You can't set + it simultaneously with days. + type: number + type: object + type: array + type: object + type: array + type: object + type: array + policy: + type: string + secretKeySecretRef: + description: The secret key to use when applying changes. This + value can also be provided as storage_secret_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + serverSideEncryptionConfiguration: + description: A configuration of server-side encryption for the + bucket (documented below) + items: + properties: + rule: + description: Specifies a default locking configuration for + added objects. Require object_lock_enabled to be enabled. + items: + properties: + applyServerSideEncryptionByDefault: + description: A single object for setting server-side + encryption by default. (documented below) + items: + properties: + kmsMasterKeyId: + description: The KMS master key ID used for + the SSE-KMS encryption. + type: string + kmsMasterKeyIdRef: + description: Reference to a SymmetricKey in + kms to populate kmsMasterKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsMasterKeyIdSelector: + description: Selector for a SymmetricKey in + kms to populate kmsMasterKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sseAlgorithm: + description: The server-side encryption algorithm + to use. Single valid value is aws:kms + type: string + type: object + type: array + type: object + type: array + type: object + type: array + tags: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + versioning: + description: A state of versioning (documented below) + items: + properties: + enabled: + description: Enable versioning. Once you version-enable + a bucket, it can never return to an unversioned state. + You can, however, suspend versioning on that bucket. + type: boolean + type: object + type: array + website: + description: A website object (documented below). + items: + properties: + errorDocument: + description: An absolute path to the document to return + in case of a 4XX error. + type: string + indexDocument: + description: Storage returns this index document when requests + are made to the root domain or any of the subfolders. + type: string + redirectAllRequestsTo: + description: A hostname to redirect all website requests + for this bucket to. Hostname can optionally be prefixed + with a protocol (http:// or https://) to use when redirecting + requests. The default is the protocol that is used in + the original request. + type: string + routingRules: + description: A json array containing routing rules describing + redirect behavior and when redirects are applied. + type: string + type: object + type: array + websiteDomain: + description: The domain of the website endpoint, if the bucket + is configured with a website. If not, this will be an empty + string. + type: string + websiteEndpoint: + description: The website endpoint, if the bucket is configured + with a website. If not, this will be an empty string. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: BucketStatus defines the observed state of Bucket. + properties: + atProvider: + properties: + accessKey: + description: The access key to use when applying changes. This + value can also be provided as storage_access_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + type: string + acl: + description: The predefined ACL to apply. Defaults to private. + Conflicts with grant. + type: string + anonymousAccessFlags: + description: Provides various access to objects. See bucket availability + for more infomation. + items: + properties: + configRead: + description: Allows to read objects in bucket anonymously. + type: boolean + list: + description: Allows to list object in bucket anonymously. + type: boolean + read: + description: Allows to read objects in bucket anonymously. + type: boolean + type: object + type: array + bucket: + description: The name of the bucket. + type: string + bucketDomainName: + description: The bucket domain name. + type: string + bucketPrefix: + description: Creates a unique bucket name beginning with the specified + prefix. Conflicts with bucket. + type: string + corsRule: + description: A rule of Cross-Origin Resource Sharing (documented + below). + items: + properties: + allowedHeaders: + description: Specifies which headers are allowed. + items: + type: string + type: array + allowedMethods: + description: Specifies which methods are allowed. Can be + GET, PUT, POST, DELETE or HEAD. + items: + type: string + type: array + allowedOrigins: + description: Specifies which origins are allowed. + items: + type: string + type: array + exposeHeaders: + description: Specifies expose header in the response. + items: + type: string + type: array + maxAgeSeconds: + description: Specifies time in seconds that browser can + cache the response for a preflight request. + type: number + type: object + type: array + defaultStorageClass: + description: 'Storage class which is used for storing objects + by default. Available values are: "STANDARD", "COLD", "ICE". + Default is "STANDARD". See storage class for more inforamtion.' + type: string + folderId: + description: Allow to create bucket in different folder. + type: string + forceDestroy: + description: A boolean that indicates all objects should be deleted + from the bucket so that the bucket can be destroyed without + error. These objects are not recoverable. + type: boolean + grant: + description: An ACL policy grant. Conflicts with acl. + items: + properties: + id: + description: Canonical user id to grant for. Used only when + type is CanonicalUser. + type: string + permissions: + description: List of permissions to apply for grantee. Valid + values are READ, WRITE, FULL_CONTROL. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Type of grantee to apply for. Valid values + are CanonicalUser and Group. + type: string + uri: + description: Uri address to grant for. Used only when type + is Group. + type: string + type: object + type: array + https: + description: Manages https certificates for bucket. See https + for more infomation. + items: + properties: + certificateId: + description: — Id of the certificate in Certificate Manager, + that will be used for bucket. + type: string + type: object + type: array + id: + description: Canonical user id to grant for. Used only when type + is CanonicalUser. + type: string + lifecycleRule: + description: A configuration of object lifecycle management (documented + below). + items: + properties: + abortIncompleteMultipartUploadDays: + description: Specifies the number of days after initiating + a multipart upload when the multipart upload must be completed. + type: number + enabled: + description: Enable versioning. Once you version-enable + a bucket, it can never return to an unversioned state. + You can, however, suspend versioning on that bucket. + type: boolean + expiration: + description: Specifies a period in the object's expire (documented + below). + items: + properties: + date: + description: Specifies the date after which you want + the corresponding action to take effect. + type: string + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + expiredObjectDeleteMarker: + description: On a versioned bucket (versioning-enabled + or versioning-suspended bucket), you can add this + element in the lifecycle configuration to direct + Object Storage to delete expired object delete markers. + type: boolean + type: object + type: array + filter: + description: 'Filter block identifies one or more objects + to which the rule applies. A Filter must have exactly + one of Prefix, Tag, or And specified. The filter supports + the following options:' + items: + properties: + and: + description: 'operator applied to one or more filter + parameters. It should be used when two or more of + the above parameters are used. It supports the following + parameters:' + items: + properties: + objectSizeGreaterThan: + type: number + objectSizeLessThan: + type: number + prefix: + description: Object key prefix identifying one + or more objects to which the rule applies. + type: string + tags: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + type: array + objectSizeGreaterThan: + type: number + objectSizeLessThan: + type: number + prefix: + description: Object key prefix identifying one or + more objects to which the rule applies. + type: string + tag: + items: + properties: + key: + type: string + value: + type: string + type: object + type: array + type: object + type: array + id: + description: Canonical user id to grant for. Used only when + type is CanonicalUser. + type: string + noncurrentVersionExpiration: + description: Specifies when noncurrent object versions expire + (documented below). + items: + properties: + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + type: object + type: array + noncurrentVersionTransition: + description: Specifies when noncurrent object versions transitions + (documented below). + items: + properties: + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + storageClass: + description: 'Specifies the storage class to which + you want the object to transition. Supported values: + [STANDARD_IA, COLD, ICE].' + type: string + type: object + type: array + prefix: + description: Object key prefix identifying one or more objects + to which the rule applies. + type: string + transition: + description: Specifies a period in the object's transitions + (documented below). + items: + properties: + date: + description: Specifies the date after which you want + the corresponding action to take effect. + type: string + days: + description: Specifies a retention period in days + after uploading an object version. It must be a + positive integer. You can't set it simultaneously + with years. + type: number + storageClass: + description: 'Specifies the storage class to which + you want the object to transition. Supported values: + [STANDARD_IA, COLD, ICE].' + type: string + type: object + type: array + type: object + type: array + logging: + description: A settings of bucket logging (documented below). + items: + properties: + targetBucket: + description: The name of the bucket that will receive the + log objects. + type: string + targetPrefix: + description: To specify a key prefix for log objects. + type: string + type: object + type: array + maxSize: + description: The size of bucket, in bytes. See size limiting for + more information. + type: number + objectLockConfiguration: + description: A configuration of object lock management (documented + below). + items: + properties: + objectLockEnabled: + description: Enable object locking in a bucket. Require + versioning to be enabled. + type: string + rule: + description: Specifies a default locking configuration for + added objects. Require object_lock_enabled to be enabled. + items: + properties: + defaultRetention: + items: + properties: + days: + description: Specifies a retention period in + days after uploading an object version. It + must be a positive integer. You can't set + it simultaneously with years. + type: number + mode: + description: Specifies a type of object lock. + One of ["GOVERNANCE", "COMPLIANCE"]. + type: string + years: + description: Specifies a retention period in + years after uploading an object version. It + must be a positive integer. You can't set + it simultaneously with days. + type: number + type: object + type: array + type: object + type: array + type: object + type: array + policy: + type: string + serverSideEncryptionConfiguration: + description: A configuration of server-side encryption for the + bucket (documented below) + items: + properties: + rule: + description: Specifies a default locking configuration for + added objects. Require object_lock_enabled to be enabled. + items: + properties: + applyServerSideEncryptionByDefault: + description: A single object for setting server-side + encryption by default. (documented below) + items: + properties: + kmsMasterKeyId: + description: The KMS master key ID used for + the SSE-KMS encryption. + type: string + sseAlgorithm: + description: The server-side encryption algorithm + to use. Single valid value is aws:kms + type: string + type: object + type: array + type: object + type: array + type: object + type: array + tags: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + versioning: + description: A state of versioning (documented below) + items: + properties: + enabled: + description: Enable versioning. Once you version-enable + a bucket, it can never return to an unversioned state. + You can, however, suspend versioning on that bucket. + type: boolean + type: object + type: array + website: + description: A website object (documented below). + items: + properties: + errorDocument: + description: An absolute path to the document to return + in case of a 4XX error. + type: string + indexDocument: + description: Storage returns this index document when requests + are made to the root domain or any of the subfolders. + type: string + redirectAllRequestsTo: + description: A hostname to redirect all website requests + for this bucket to. Hostname can optionally be prefixed + with a protocol (http:// or https://) to use when redirecting + requests. The default is the protocol that is used in + the original request. + type: string + routingRules: + description: A json array containing routing rules describing + redirect behavior and when redirects are applied. + type: string + type: object + type: array + websiteDomain: + description: The domain of the website endpoint, if the bucket + is configured with a website. If not, this will be an empty + string. + type: string + websiteEndpoint: + description: The website endpoint, if the bucket is configured + with a website. If not, this will be an empty string. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/storage.yandex-cloud.upjet.crossplane.io_objects.yaml b/package/crds/storage.yandex-cloud.upjet.crossplane.io_objects.yaml new file mode 100644 index 0000000..b65706d --- /dev/null +++ b/package/crds/storage.yandex-cloud.upjet.crossplane.io_objects.yaml @@ -0,0 +1,858 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: objects.storage.yandex-cloud.upjet.crossplane.io +spec: + group: storage.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Object + listKind: ObjectList + plural: objects + singular: object + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Object is the Schema for the Objects API. Allows management of + a Yandex.Cloud Storage Object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObjectSpec defines the desired state of Object + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessKey: + description: The access key to use when applying changes. This + value can also be provided as storage_access_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + type: string + accessKeyRef: + description: Reference to a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessKeySelector: + description: Selector for a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + acl: + description: The predefined ACL to apply. Defaults to private. + type: string + bucket: + description: The name of the containing bucket. + type: string + bucketRef: + description: Reference to a Bucket to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + content: + description: Literal string value to use as the object content, + which will be uploaded as UTF-8-encoded text. + type: string + contentBase64: + description: Base64-encoded data that will be decoded and uploaded + as raw bytes for the object content. This allows safely uploading + non-UTF8 binary data, but is recommended only for small content + such as the result of the gzipbase64 function with small text + strings. For larger objects, use source to stream the content + from a disk file. + type: string + contentType: + description: A standard MIME type describing the format of the + object data, e.g. application/octet-stream. All Valid MIME Types + are valid for this input. + type: string + key: + description: The name of the object once it is in the bucket. + type: string + objectLockLegalHoldStatus: + description: Specifies a legal hold status of an object. Requires + object_lock_configuration to be enabled on a bucket. + type: string + objectLockMode: + description: Specifies a type of object lock. One of ["GOVERNANCE", + "COMPLIANCE"]. It must be set simultaneously with object_lock_retain_until_date. + Requires object_lock_configuration to be enabled on a bucket. + type: string + objectLockRetainUntilDate: + description: Specifies date and time in RTC3339 format until which + an object is to be locked. It must be set simultaneously with + object_lock_mode. Requires object_lock_configuration to be enabled + on a bucket. + type: string + secretKeySecretRef: + description: The secret key to use when applying changes. This + value can also be provided as storage_secret_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: The path to a file that will be read and uploaded + as raw bytes for the object content. + type: string + sourceHash: + description: Used to trigger object update when the source content + changes. So the only meaningful value is filemd5("path/to/source") + (The value is only stored in state and not saved by Yandex Storage). + type: string + tags: + additionalProperties: + type: string + description: Specifies an object tags. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessKey: + description: The access key to use when applying changes. This + value can also be provided as storage_access_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + type: string + accessKeyRef: + description: Reference to a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessKeySelector: + description: Selector for a ServiceAccountStaticAccessKey in iam + to populate accessKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + acl: + description: The predefined ACL to apply. Defaults to private. + type: string + bucket: + description: The name of the containing bucket. + type: string + bucketRef: + description: Reference to a Bucket to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + content: + description: Literal string value to use as the object content, + which will be uploaded as UTF-8-encoded text. + type: string + contentBase64: + description: Base64-encoded data that will be decoded and uploaded + as raw bytes for the object content. This allows safely uploading + non-UTF8 binary data, but is recommended only for small content + such as the result of the gzipbase64 function with small text + strings. For larger objects, use source to stream the content + from a disk file. + type: string + contentType: + description: A standard MIME type describing the format of the + object data, e.g. application/octet-stream. All Valid MIME Types + are valid for this input. + type: string + key: + description: The name of the object once it is in the bucket. + type: string + objectLockLegalHoldStatus: + description: Specifies a legal hold status of an object. Requires + object_lock_configuration to be enabled on a bucket. + type: string + objectLockMode: + description: Specifies a type of object lock. One of ["GOVERNANCE", + "COMPLIANCE"]. It must be set simultaneously with object_lock_retain_until_date. + Requires object_lock_configuration to be enabled on a bucket. + type: string + objectLockRetainUntilDate: + description: Specifies date and time in RTC3339 format until which + an object is to be locked. It must be set simultaneously with + object_lock_mode. Requires object_lock_configuration to be enabled + on a bucket. + type: string + secretKeySecretRef: + description: The secret key to use when applying changes. This + value can also be provided as storage_secret_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: The path to a file that will be read and uploaded + as raw bytes for the object content. + type: string + sourceHash: + description: Used to trigger object update when the source content + changes. So the only meaningful value is filemd5("path/to/source") + (The value is only stored in state and not saved by Yandex Storage). + type: string + tags: + additionalProperties: + type: string + description: Specifies an object tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.key is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.key) + || (has(self.initProvider) && has(self.initProvider.key))' + status: + description: ObjectStatus defines the observed state of Object. + properties: + atProvider: + properties: + accessKey: + description: The access key to use when applying changes. This + value can also be provided as storage_access_key specified in + provider config (explicitly or within shared_credentials_file) + is used. + type: string + acl: + description: The predefined ACL to apply. Defaults to private. + type: string + bucket: + description: The name of the containing bucket. + type: string + content: + description: Literal string value to use as the object content, + which will be uploaded as UTF-8-encoded text. + type: string + contentBase64: + description: Base64-encoded data that will be decoded and uploaded + as raw bytes for the object content. This allows safely uploading + non-UTF8 binary data, but is recommended only for small content + such as the result of the gzipbase64 function with small text + strings. For larger objects, use source to stream the content + from a disk file. + type: string + contentType: + description: A standard MIME type describing the format of the + object data, e.g. application/octet-stream. All Valid MIME Types + are valid for this input. + type: string + id: + description: The key of the resource. + type: string + key: + description: The name of the object once it is in the bucket. + type: string + objectLockLegalHoldStatus: + description: Specifies a legal hold status of an object. Requires + object_lock_configuration to be enabled on a bucket. + type: string + objectLockMode: + description: Specifies a type of object lock. One of ["GOVERNANCE", + "COMPLIANCE"]. It must be set simultaneously with object_lock_retain_until_date. + Requires object_lock_configuration to be enabled on a bucket. + type: string + objectLockRetainUntilDate: + description: Specifies date and time in RTC3339 format until which + an object is to be locked. It must be set simultaneously with + object_lock_mode. Requires object_lock_configuration to be enabled + on a bucket. + type: string + source: + description: The path to a file that will be read and uploaded + as raw bytes for the object content. + type: string + sourceHash: + description: Used to trigger object update when the source content + changes. So the only meaningful value is filemd5("path/to/source") + (The value is only stored in state and not saved by Yandex Storage). + type: string + tags: + additionalProperties: + type: string + description: Specifies an object tags. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/sws.yandex-cloud.upjet.crossplane.io_securityprofiles.yaml b/package/crds/sws.yandex-cloud.upjet.crossplane.io_securityprofiles.yaml new file mode 100644 index 0000000..14a6cce --- /dev/null +++ b/package/crds/sws.yandex-cloud.upjet.crossplane.io_securityprofiles.yaml @@ -0,0 +1,2286 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: securityprofiles.sws.yandex-cloud.upjet.crossplane.io +spec: + group: sws.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SecurityProfile + listKind: SecurityProfileList + plural: securityprofiles + singular: securityprofile + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SecurityProfile is the Schema for the SecurityProfiles API. With + security profiles you can protect your infrastructure from DDoS attacks + at the application level (L7). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecurityProfileSpec defines the desired state of SecurityProfile + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + advancedRateLimiterProfileId: + description: Advanced rate limiter profile ID to use with this + security profile. Set empty to use default. + type: string + captchaId: + description: Captcha ID to use with this security profile. Set + empty to use default. + type: string + cloudId: + description: ID of the security profile. + type: string + defaultAction: + description: 'Action to perform if none of rules matched. Possible + values: ALLOW or DENY.' + type: string + description: + description: Optional description of the security profile. + type: string + folderId: + description: ID of the folder to create a profile in. If omitted, + the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels as key:value pairs. Maximum of 64 per resource. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the security profile. The name is unique + within the folder. 1-50 characters long. + type: string + securityRule: + description: List of security rules. The structure is documented + below. + items: + properties: + description: + description: Optional description of the rule. 0-512 characters + long. + type: string + dryRun: + description: This mode allows you to test your security + profile or a single rule. + type: boolean + name: + description: Name of the rule. The name is unique within + the security profile. 1-50 characters long. + type: string + priority: + description: Determines the priority for checking the incoming + traffic. + type: number + ruleCondition: + description: Rule actions, see Rule actions. The structure + is documented below. + items: + properties: + action: + description: 'Action to perform if this rule matched. + Possible values: ALLOW or DENY.' + type: string + condition: + description: The condition for matching the rule. + You can find all possibilities of condition in gRPC + specs. + items: + properties: + authority: + items: + properties: + authorities: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + headers: + items: + properties: + name: + description: Name of the rule. The name + is unique within the security profile. + 1-50 characters long. + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + httpMethod: + items: + properties: + httpMethods: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + requestUri: + items: + properties: + path: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + queries: + items: + properties: + key: + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + sourceIp: + items: + properties: + geoIpMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + smartProtection: + description: Smart Protection rule, see Smart Protection + rules. The structure is documented below. + items: + properties: + condition: + description: The condition for matching the rule. + You can find all possibilities of condition in gRPC + specs. + items: + properties: + authority: + items: + properties: + authorities: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + headers: + items: + properties: + name: + description: Name of the rule. The name + is unique within the security profile. + 1-50 characters long. + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + httpMethod: + items: + properties: + httpMethods: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + requestUri: + items: + properties: + path: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + queries: + items: + properties: + key: + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + sourceIp: + items: + properties: + geoIpMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + mode: + description: 'Mode of protection. Possible values: + FULL (full protection means that the traffic will + be checked based on ML models and behavioral analysis, + with suspicious requests being sent to SmartCaptcha) + or API (API protection means checking the traffic + based on ML models and behavioral analysis without + sending suspicious requests to SmartCaptcha. The + suspicious requests will be blocked).' + type: string + type: object + type: array + waf: + description: Web Application Firewall (WAF) rule, see WAF + rules. The structure is documented below. + items: + properties: + condition: + description: The condition for matching the rule. + You can find all possibilities of condition in gRPC + specs. + items: + properties: + authority: + items: + properties: + authorities: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + headers: + items: + properties: + name: + description: Name of the rule. The name + is unique within the security profile. + 1-50 characters long. + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + httpMethod: + items: + properties: + httpMethods: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + requestUri: + items: + properties: + path: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + queries: + items: + properties: + key: + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + sourceIp: + items: + properties: + geoIpMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + mode: + description: 'Mode of protection. Possible values: + FULL (full protection means that the traffic will + be checked based on ML models and behavioral analysis, + with suspicious requests being sent to SmartCaptcha) + or API (API protection means checking the traffic + based on ML models and behavioral analysis without + sending suspicious requests to SmartCaptcha. The + suspicious requests will be blocked).' + type: string + wafProfileId: + description: ID of WAF profile to use in this rule. + type: string + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + advancedRateLimiterProfileId: + description: Advanced rate limiter profile ID to use with this + security profile. Set empty to use default. + type: string + captchaId: + description: Captcha ID to use with this security profile. Set + empty to use default. + type: string + cloudId: + description: ID of the security profile. + type: string + defaultAction: + description: 'Action to perform if none of rules matched. Possible + values: ALLOW or DENY.' + type: string + description: + description: Optional description of the security profile. + type: string + folderId: + description: ID of the folder to create a profile in. If omitted, + the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels as key:value pairs. Maximum of 64 per resource. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the security profile. The name is unique + within the folder. 1-50 characters long. + type: string + securityRule: + description: List of security rules. The structure is documented + below. + items: + properties: + description: + description: Optional description of the rule. 0-512 characters + long. + type: string + dryRun: + description: This mode allows you to test your security + profile or a single rule. + type: boolean + name: + description: Name of the rule. The name is unique within + the security profile. 1-50 characters long. + type: string + priority: + description: Determines the priority for checking the incoming + traffic. + type: number + ruleCondition: + description: Rule actions, see Rule actions. The structure + is documented below. + items: + properties: + action: + description: 'Action to perform if this rule matched. + Possible values: ALLOW or DENY.' + type: string + condition: + description: The condition for matching the rule. + You can find all possibilities of condition in gRPC + specs. + items: + properties: + authority: + items: + properties: + authorities: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + headers: + items: + properties: + name: + description: Name of the rule. The name + is unique within the security profile. + 1-50 characters long. + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + httpMethod: + items: + properties: + httpMethods: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + requestUri: + items: + properties: + path: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + queries: + items: + properties: + key: + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + sourceIp: + items: + properties: + geoIpMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + smartProtection: + description: Smart Protection rule, see Smart Protection + rules. The structure is documented below. + items: + properties: + condition: + description: The condition for matching the rule. + You can find all possibilities of condition in gRPC + specs. + items: + properties: + authority: + items: + properties: + authorities: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + headers: + items: + properties: + name: + description: Name of the rule. The name + is unique within the security profile. + 1-50 characters long. + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + httpMethod: + items: + properties: + httpMethods: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + requestUri: + items: + properties: + path: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + queries: + items: + properties: + key: + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + sourceIp: + items: + properties: + geoIpMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + mode: + description: 'Mode of protection. Possible values: + FULL (full protection means that the traffic will + be checked based on ML models and behavioral analysis, + with suspicious requests being sent to SmartCaptcha) + or API (API protection means checking the traffic + based on ML models and behavioral analysis without + sending suspicious requests to SmartCaptcha. The + suspicious requests will be blocked).' + type: string + type: object + type: array + waf: + description: Web Application Firewall (WAF) rule, see WAF + rules. The structure is documented below. + items: + properties: + condition: + description: The condition for matching the rule. + You can find all possibilities of condition in gRPC + specs. + items: + properties: + authority: + items: + properties: + authorities: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + headers: + items: + properties: + name: + description: Name of the rule. The name + is unique within the security profile. + 1-50 characters long. + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + httpMethod: + items: + properties: + httpMethods: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + requestUri: + items: + properties: + path: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + queries: + items: + properties: + key: + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + sourceIp: + items: + properties: + geoIpMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + mode: + description: 'Mode of protection. Possible values: + FULL (full protection means that the traffic will + be checked based on ML models and behavioral analysis, + with suspicious requests being sent to SmartCaptcha) + or API (API protection means checking the traffic + based on ML models and behavioral analysis without + sending suspicious requests to SmartCaptcha. The + suspicious requests will be blocked).' + type: string + wafProfileId: + description: ID of WAF profile to use in this rule. + type: string + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SecurityProfileStatus defines the observed state of SecurityProfile. + properties: + atProvider: + properties: + advancedRateLimiterProfileId: + description: Advanced rate limiter profile ID to use with this + security profile. Set empty to use default. + type: string + captchaId: + description: Captcha ID to use with this security profile. Set + empty to use default. + type: string + cloudId: + description: ID of the security profile. + type: string + createdAt: + description: The Security Profile creation timestamp. + type: string + defaultAction: + description: 'Action to perform if none of rules matched. Possible + values: ALLOW or DENY.' + type: string + description: + description: Optional description of the security profile. + type: string + folderId: + description: ID of the folder to create a profile in. If omitted, + the provider folder is used. + type: string + id: + description: ID of the security profile. + type: string + labels: + additionalProperties: + type: string + description: Labels as key:value pairs. Maximum of 64 per resource. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the security profile. The name is unique + within the folder. 1-50 characters long. + type: string + securityRule: + description: List of security rules. The structure is documented + below. + items: + properties: + description: + description: Optional description of the rule. 0-512 characters + long. + type: string + dryRun: + description: This mode allows you to test your security + profile or a single rule. + type: boolean + name: + description: Name of the rule. The name is unique within + the security profile. 1-50 characters long. + type: string + priority: + description: Determines the priority for checking the incoming + traffic. + type: number + ruleCondition: + description: Rule actions, see Rule actions. The structure + is documented below. + items: + properties: + action: + description: 'Action to perform if this rule matched. + Possible values: ALLOW or DENY.' + type: string + condition: + description: The condition for matching the rule. + You can find all possibilities of condition in gRPC + specs. + items: + properties: + authority: + items: + properties: + authorities: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + headers: + items: + properties: + name: + description: Name of the rule. The name + is unique within the security profile. + 1-50 characters long. + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + httpMethod: + items: + properties: + httpMethods: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + requestUri: + items: + properties: + path: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + queries: + items: + properties: + key: + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + sourceIp: + items: + properties: + geoIpMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + smartProtection: + description: Smart Protection rule, see Smart Protection + rules. The structure is documented below. + items: + properties: + condition: + description: The condition for matching the rule. + You can find all possibilities of condition in gRPC + specs. + items: + properties: + authority: + items: + properties: + authorities: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + headers: + items: + properties: + name: + description: Name of the rule. The name + is unique within the security profile. + 1-50 characters long. + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + httpMethod: + items: + properties: + httpMethods: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + requestUri: + items: + properties: + path: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + queries: + items: + properties: + key: + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + sourceIp: + items: + properties: + geoIpMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + mode: + description: 'Mode of protection. Possible values: + FULL (full protection means that the traffic will + be checked based on ML models and behavioral analysis, + with suspicious requests being sent to SmartCaptcha) + or API (API protection means checking the traffic + based on ML models and behavioral analysis without + sending suspicious requests to SmartCaptcha. The + suspicious requests will be blocked).' + type: string + type: object + type: array + waf: + description: Web Application Firewall (WAF) rule, see WAF + rules. The structure is documented below. + items: + properties: + condition: + description: The condition for matching the rule. + You can find all possibilities of condition in gRPC + specs. + items: + properties: + authority: + items: + properties: + authorities: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + headers: + items: + properties: + name: + description: Name of the rule. The name + is unique within the security profile. + 1-50 characters long. + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + httpMethod: + items: + properties: + httpMethods: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + requestUri: + items: + properties: + path: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + queries: + items: + properties: + key: + type: string + value: + items: + properties: + exactMatch: + type: string + exactNotMatch: + type: string + pireRegexMatch: + type: string + pireRegexNotMatch: + type: string + prefixMatch: + type: string + prefixNotMatch: + type: string + type: object + type: array + type: object + type: array + type: object + type: array + sourceIp: + items: + properties: + geoIpMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + geoIpNotMatch: + items: + properties: + locations: + items: + type: string + type: array + type: object + type: array + ipRangesMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + ipRangesNotMatch: + items: + properties: + ipRanges: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + mode: + description: 'Mode of protection. Possible values: + FULL (full protection means that the traffic will + be checked based on ML models and behavioral analysis, + with suspicious requests being sent to SmartCaptcha) + or API (API protection means checking the traffic + based on ML models and behavioral analysis without + sending suspicious requests to SmartCaptcha. The + suspicious requests will be blocked).' + type: string + wafProfileId: + description: ID of WAF profile to use in this rule. + type: string + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/vpc.yandex-cloud.upjet.crossplane.io_addresses.yaml b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_addresses.yaml new file mode 100644 index 0000000..db50587 --- /dev/null +++ b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_addresses.yaml @@ -0,0 +1,658 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: addresses.vpc.yandex-cloud.upjet.crossplane.io +spec: + group: vpc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Address + listKind: AddressList + plural: addresses + singular: address + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Address is the Schema for the Addresss API. Manages a VPC address + within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AddressSpec defines the desired state of Address + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deletionProtection: + description: Flag that protects the address from accidental deletion. + type: boolean + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + dnsRecord: + description: DNS record specification of address + items: + properties: + dnsZoneId: + description: DNS zone id to create record at. + type: string + fqdn: + description: FQDN for record to address + type: string + ptr: + description: If PTR record is needed + type: boolean + ttl: + description: TTL of DNS record + type: number + type: object + type: array + externalIpv4Address: + description: spec of IP v4 address + items: + properties: + ddosProtectionProvider: + description: 'Enable DDOS protection. Possible values are: + "qrator"' + type: string + outgoingSmtpCapability: + description: Wanted outgoing smtp capability. + type: string + zoneId: + description: Zone for allocating address. + type: string + type: object + type: array + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to apply to this resource. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the address. Provided by the client when + the address is created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deletionProtection: + description: Flag that protects the address from accidental deletion. + type: boolean + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + dnsRecord: + description: DNS record specification of address + items: + properties: + dnsZoneId: + description: DNS zone id to create record at. + type: string + fqdn: + description: FQDN for record to address + type: string + ptr: + description: If PTR record is needed + type: boolean + ttl: + description: TTL of DNS record + type: number + type: object + type: array + externalIpv4Address: + description: spec of IP v4 address + items: + properties: + ddosProtectionProvider: + description: 'Enable DDOS protection. Possible values are: + "qrator"' + type: string + outgoingSmtpCapability: + description: Wanted outgoing smtp capability. + type: string + zoneId: + description: Zone for allocating address. + type: string + type: object + type: array + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to apply to this resource. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the address. Provided by the client when + the address is created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AddressStatus defines the observed state of Address. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the key. + type: string + deletionProtection: + description: Flag that protects the address from accidental deletion. + type: boolean + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + dnsRecord: + description: DNS record specification of address + items: + properties: + dnsZoneId: + description: DNS zone id to create record at. + type: string + fqdn: + description: FQDN for record to address + type: string + ptr: + description: If PTR record is needed + type: boolean + ttl: + description: TTL of DNS record + type: number + type: object + type: array + externalIpv4Address: + description: spec of IP v4 address + items: + properties: + address: + description: Allocated IP address. + type: string + ddosProtectionProvider: + description: 'Enable DDOS protection. Possible values are: + "qrator"' + type: string + outgoingSmtpCapability: + description: Wanted outgoing smtp capability. + type: string + zoneId: + description: Zone for allocating address. + type: string + type: object + type: array + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: Labels to apply to this resource. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the address. Provided by the client when + the address is created. + type: string + reserved: + description: false means that address is ephemeral. + type: boolean + used: + description: true if address is used. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/vpc.yandex-cloud.upjet.crossplane.io_defaultsecuritygroups.yaml b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_defaultsecuritygroups.yaml new file mode 100644 index 0000000..8dfc667 --- /dev/null +++ b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_defaultsecuritygroups.yaml @@ -0,0 +1,973 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: defaultsecuritygroups.vpc.yandex-cloud.upjet.crossplane.io +spec: + group: vpc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: DefaultSecurityGroup + listKind: DefaultSecurityGroupList + plural: defaultsecuritygroups + singular: defaultsecuritygroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DefaultSecurityGroup is the Schema for the DefaultSecurityGroups + API. Yandex VPC Default Security Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DefaultSecurityGroupSpec defines the desired state of DefaultSecurityGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the security group. + type: string + egress: + description: A list of egress rules. The structure is documented + below. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + folderId: + description: ID of the folder this security group belongs to. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + ingress: + description: A list of ingress rules. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this rule. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + networkId: + description: ID of the network this security group belongs to. + type: string + networkIdRef: + description: Reference to a Network to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the security group. + type: string + egress: + description: A list of egress rules. The structure is documented + below. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + folderId: + description: ID of the folder this security group belongs to. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + ingress: + description: A list of ingress rules. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this rule. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + networkId: + description: ID of the network this security group belongs to. + type: string + networkIdRef: + description: Reference to a Network to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DefaultSecurityGroupStatus defines the observed state of + DefaultSecurityGroup. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of this security group. + type: string + description: + description: Description of the security group. + type: string + egress: + description: A list of egress rules. The structure is documented + below. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + id: + description: Id of the security group. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + folderId: + description: ID of the folder this security group belongs to. + type: string + id: + description: Id of the security group. + type: string + ingress: + description: A list of ingress rules. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + id: + description: Id of the security group. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this rule. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + name: + description: Name of this security group. + type: string + networkId: + description: ID of the network this security group belongs to. + type: string + status: + description: Status of this security group. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/vpc.yandex-cloud.upjet.crossplane.io_gateways.yaml b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_gateways.yaml new file mode 100644 index 0000000..258c686 --- /dev/null +++ b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_gateways.yaml @@ -0,0 +1,553 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: gateways.vpc.yandex-cloud.upjet.crossplane.io +spec: + group: vpc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Gateway + listKind: GatewayList + plural: gateways + singular: gateway + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Gateway is the Schema for the Gateways API. Manages a gateway + within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GatewaySpec defines the desired state of Gateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to apply to this VPC Gateway. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the VPC Gateway. Provided by the client when + the VPC Gateway is created. + type: string + sharedEgressGateway: + description: Shared egress gateway configuration. Currently empty. + items: + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to apply to this VPC Gateway. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the VPC Gateway. Provided by the client when + the VPC Gateway is created. + type: string + sharedEgressGateway: + description: Shared egress gateway configuration. Currently empty. + items: + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: GatewayStatus defines the observed state of Gateway. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the key. + type: string + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: Labels to apply to this VPC Gateway. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the VPC Gateway. Provided by the client when + the VPC Gateway is created. + type: string + sharedEgressGateway: + description: Shared egress gateway configuration. Currently empty. + items: + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/vpc.yandex-cloud.upjet.crossplane.io_networks.yaml b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_networks.yaml new file mode 100644 index 0000000..11e1ccd --- /dev/null +++ b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_networks.yaml @@ -0,0 +1,545 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: networks.vpc.yandex-cloud.upjet.crossplane.io +spec: + group: vpc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Network is the Schema for the Networks API. Manages a network + within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NetworkSpec defines the desired state of Network + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to apply to this network. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the network. Provided by the client when + the network is created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to apply to this network. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the network. Provided by the client when + the network is created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: NetworkStatus defines the observed state of Network. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the key. + type: string + defaultSecurityGroupId: + description: ID of default Security Group of this network. + type: string + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: Labels to apply to this network. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the network. Provided by the client when + the network is created. + type: string + subnetIds: + items: + type: string + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/vpc.yandex-cloud.upjet.crossplane.io_privateendpoints.yaml b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_privateendpoints.yaml new file mode 100644 index 0000000..20c4139 --- /dev/null +++ b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_privateendpoints.yaml @@ -0,0 +1,941 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: privateendpoints.vpc.yandex-cloud.upjet.crossplane.io +spec: + group: vpc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: PrivateEndpoint + listKind: PrivateEndpointList + plural: privateendpoints + singular: privateendpoint + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PrivateEndpoint is the Schema for the PrivateEndpoints API. Manages + a VPC Private Endpoint within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PrivateEndpointSpec defines the desired state of PrivateEndpoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + dnsOptions: + description: Private endpoint DNS options block. + items: + properties: + privateDnsRecordsEnabled: + description: If enabled - additional service dns will be + created. + type: boolean + type: object + type: array + endpointAddress: + description: Private endpoint address specification block. + items: + properties: + address: + description: Specifies IP address within subnet_id. + type: string + addressId: + description: ID of the address. + type: string + subnetId: + description: Subnet of the IP address. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to apply to this resource. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the private endpoint. Provided by the client + when the private endpoint is created. + type: string + networkId: + description: ID of the network which private endpoint belongs + to. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectStorage: + items: + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + dnsOptions: + description: Private endpoint DNS options block. + items: + properties: + privateDnsRecordsEnabled: + description: If enabled - additional service dns will be + created. + type: boolean + type: object + type: array + endpointAddress: + description: Private endpoint address specification block. + items: + properties: + address: + description: Specifies IP address within subnet_id. + type: string + addressId: + description: ID of the address. + type: string + subnetId: + description: Subnet of the IP address. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to apply to this resource. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the private endpoint. Provided by the client + when the private endpoint is created. + type: string + networkId: + description: ID of the network which private endpoint belongs + to. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectStorage: + items: + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.objectStorage is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.objectStorage) + || (has(self.initProvider) && has(self.initProvider.objectStorage))' + status: + description: PrivateEndpointStatus defines the observed state of PrivateEndpoint. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the key. + type: string + description: + description: An optional description of this resource. Provide + this property when you create the resource. + type: string + dnsOptions: + description: Private endpoint DNS options block. + items: + properties: + privateDnsRecordsEnabled: + description: If enabled - additional service dns will be + created. + type: boolean + type: object + type: array + endpointAddress: + description: Private endpoint address specification block. + items: + properties: + address: + description: Specifies IP address within subnet_id. + type: string + addressId: + description: ID of the address. + type: string + subnetId: + description: Subnet of the IP address. + type: string + type: object + type: array + folderId: + description: ID of the folder that the resource belongs to. If + it is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: Labels to apply to this resource. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the private endpoint. Provided by the client + when the private endpoint is created. + type: string + networkId: + description: ID of the network which private endpoint belongs + to. + type: string + objectStorage: + items: + type: object + type: array + status: + description: Status of the private endpoint. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/vpc.yandex-cloud.upjet.crossplane.io_routetables.yaml b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_routetables.yaml new file mode 100644 index 0000000..a2d4893 --- /dev/null +++ b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_routetables.yaml @@ -0,0 +1,891 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: routetables.vpc.yandex-cloud.upjet.crossplane.io +spec: + group: vpc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: RouteTable + listKind: RouteTableList + plural: routetables + singular: routetable + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: RouteTable is the Schema for the RouteTables API. A VPC route + table is a virtual version of the traditional route table on router device. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RouteTableSpec defines the desired state of RouteTable + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional description of the route table. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this route table. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the route table. Provided by the client when + the route table is created. + type: string + networkId: + description: ID of the network this route table belongs to. + type: string + networkIdRef: + description: Reference to a Network to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + staticRoute: + description: A list of static route records for the route table. + The structure is documented below. + items: + properties: + destinationPrefix: + description: Route prefix in CIDR notation. + type: string + gatewayId: + description: ID of the gateway used ad next hop. + type: string + gatewayIdRef: + description: Reference to a Gateway to populate gatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + gatewayIdSelector: + description: Selector for a Gateway to populate gatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + nextHopAddress: + description: Address of the next hop. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional description of the route table. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this route table. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the route table. Provided by the client when + the route table is created. + type: string + networkId: + description: ID of the network this route table belongs to. + type: string + networkIdRef: + description: Reference to a Network to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + staticRoute: + description: A list of static route records for the route table. + The structure is documented below. + items: + properties: + destinationPrefix: + description: Route prefix in CIDR notation. + type: string + gatewayId: + description: ID of the gateway used ad next hop. + type: string + gatewayIdRef: + description: Reference to a Gateway to populate gatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + gatewayIdSelector: + description: Selector for a Gateway to populate gatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + nextHopAddress: + description: Address of the next hop. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: RouteTableStatus defines the observed state of RouteTable. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of the route table. + type: string + description: + description: An optional description of the route table. Provide + this property when you create the resource. + type: string + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this route table. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the route table. Provided by the client when + the route table is created. + type: string + networkId: + description: ID of the network this route table belongs to. + type: string + staticRoute: + description: A list of static route records for the route table. + The structure is documented below. + items: + properties: + destinationPrefix: + description: Route prefix in CIDR notation. + type: string + gatewayId: + description: ID of the gateway used ad next hop. + type: string + nextHopAddress: + description: Address of the next hop. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/vpc.yandex-cloud.upjet.crossplane.io_securitygrouprules.yaml b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_securitygrouprules.yaml new file mode 100644 index 0000000..8ad2bac --- /dev/null +++ b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_securitygrouprules.yaml @@ -0,0 +1,621 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: securitygrouprules.vpc.yandex-cloud.upjet.crossplane.io +spec: + group: vpc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SecurityGroupRule + listKind: SecurityGroupRuleList + plural: securitygrouprules + singular: securitygrouprule + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SecurityGroupRule is the Schema for the SecurityGroupRules API. + Yandex VPC Security Group Rule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecurityGroupRuleSpec defines the desired state of SecurityGroupRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the rule. + type: string + direction: + description: direction of the rule. Can be ingress (inbound) or + egress (outbound). + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this rule. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets such as "self_security_group". + See docs for possible options. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupBinding: + description: ID of the security group this rule belongs to. + type: string + securityGroupBindingRef: + description: Reference to a SecurityGroup to populate securityGroupBinding. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + securityGroupBindingSelector: + description: Selector for a SecurityGroup to populate securityGroupBinding. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. v6_cidr_blocks + argument is currently not supported. It will be available in + the future. + items: + type: string + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the rule. + type: string + direction: + description: direction of the rule. Can be ingress (inbound) or + egress (outbound). + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this rule. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets such as "self_security_group". + See docs for possible options. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupBinding: + description: ID of the security group this rule belongs to. + type: string + securityGroupBindingRef: + description: Reference to a SecurityGroup to populate securityGroupBinding. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + securityGroupBindingSelector: + description: Selector for a SecurityGroup to populate securityGroupBinding. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. v6_cidr_blocks + argument is currently not supported. It will be available in + the future. + items: + type: string + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.direction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.direction) + || (has(self.initProvider) && has(self.initProvider.direction))' + status: + description: SecurityGroupRuleStatus defines the observed state of SecurityGroupRule. + properties: + atProvider: + properties: + description: + description: Description of the rule. + type: string + direction: + description: direction of the rule. Can be ingress (inbound) or + egress (outbound). + type: string + fromPort: + description: Minimum port number. + type: number + id: + description: Id of the rule. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this rule. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets such as "self_security_group". + See docs for possible options. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupBinding: + description: ID of the security group this rule belongs to. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. v6_cidr_blocks + argument is currently not supported. It will be available in + the future. + items: + type: string + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/vpc.yandex-cloud.upjet.crossplane.io_securitygroups.yaml b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_securitygroups.yaml new file mode 100644 index 0000000..9d4009e --- /dev/null +++ b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_securitygroups.yaml @@ -0,0 +1,978 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: securitygroups.vpc.yandex-cloud.upjet.crossplane.io +spec: + group: vpc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SecurityGroup + listKind: SecurityGroupList + plural: securitygroups + singular: securitygroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SecurityGroup is the Schema for the SecurityGroups API. Yandex + VPC Security Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecurityGroupSpec defines the desired state of SecurityGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the security group. + type: string + egress: + description: A list of egress rules. The structure is documented + below. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + folderId: + description: ID of the folder this security group belongs to. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + ingress: + description: A list of ingress rules. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this rule. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the security group. + type: string + networkId: + description: ID of the network this security group belongs to. + type: string + networkIdRef: + description: Reference to a Network to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the security group. + type: string + egress: + description: A list of egress rules. The structure is documented + below. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + folderId: + description: ID of the folder this security group belongs to. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + ingress: + description: A list of ingress rules. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + labels: + additionalProperties: + type: string + description: Labels to assign to this rule. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the security group. + type: string + networkId: + description: ID of the network this security group belongs to. + type: string + networkIdRef: + description: Reference to a Network to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SecurityGroupStatus defines the observed state of SecurityGroup. + properties: + atProvider: + properties: + createdAt: + description: Creation timestamp of this security group. + type: string + description: + description: Description of the security group. + type: string + egress: + description: A list of egress rules. The structure is documented + below. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + id: + description: Id of the rule. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + folderId: + description: ID of the folder this security group belongs to. + type: string + id: + description: Id of the rule. + type: string + ingress: + description: A list of ingress rules. + items: + properties: + description: + description: Description of the rule. + type: string + fromPort: + description: Minimum port number. + type: number + id: + description: Id of the rule. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this rule. + type: object + x-kubernetes-map-type: granular + port: + description: Port number (if applied to a single port). + type: number + predefinedTarget: + description: Special-purpose targets. self_security_group + refers to this particular security group. loadbalancer_healthchecks + represents loadbalancer health check nodes. + type: string + protocol: + description: One of ANY, TCP, UDP, ICMP, IPV6_ICMP. + type: string + securityGroupId: + description: Target security group ID for this rule. + type: string + toPort: + description: Maximum port number. + type: number + v4CidrBlocks: + description: The blocks of IPv4 addresses for this rule. + items: + type: string + type: array + v6CidrBlocks: + description: The blocks of IPv6 addresses for this rule. + v6_cidr_blocks argument is currently not supported. It + will be available in the future. + items: + type: string + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels to assign to this security group. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the security group. + type: string + networkId: + description: ID of the network this security group belongs to. + type: string + status: + description: Status of this security group. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/vpc.yandex-cloud.upjet.crossplane.io_subnets.yaml b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_subnets.yaml new file mode 100644 index 0000000..fe3d456 --- /dev/null +++ b/package/crds/vpc.yandex-cloud.upjet.crossplane.io_subnets.yaml @@ -0,0 +1,823 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: subnets.vpc.yandex-cloud.upjet.crossplane.io +spec: + group: vpc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Subnet + listKind: SubnetList + plural: subnets + singular: subnet + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Subnet is the Schema for the Subnets API. A VPC network is a + virtual version of the traditional physical networks that exist within and + between physical data centers. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubnetSpec defines the desired state of Subnet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional description of the subnet. Provide this + property when you create the resource. + type: string + dhcpOptions: + description: Options for DHCP client. The structure is documented + below. + items: + properties: + domainName: + description: Domain name. + type: string + domainNameServers: + description: Domain name server IP addresses. + items: + type: string + type: array + ntpServers: + description: NTP server IP addresses. + items: + type: string + type: array + type: object + type: array + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this subnet. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the subnet. Provided by the client when the + subnet is created. + type: string + networkId: + description: ID of the network this subnet belongs to. Only networks + that are in the distributed mode can have subnets. + type: string + networkIdRef: + description: Reference to a Network to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routeTableId: + description: The ID of the route table to assign to this subnet. + Assigned route table should belong to the same network as this + subnet. + type: string + v4CidrBlocks: + description: A list of blocks of internal IPv4 addresses that + are owned by this subnet. Provide this property when you create + the subnet. For example, 10.0.0.0/22 or 192.168.0.0/16. Blocks + of addresses must be unique and non-overlapping within a network. + Minimum subnet size is /28, and maximum subnet size is /16. + Only IPv4 is supported. + items: + type: string + type: array + zone: + description: Name of the Yandex.Cloud zone for this subnet. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional description of the subnet. Provide this + property when you create the resource. + type: string + dhcpOptions: + description: Options for DHCP client. The structure is documented + below. + items: + properties: + domainName: + description: Domain name. + type: string + domainNameServers: + description: Domain name server IP addresses. + items: + type: string + type: array + ntpServers: + description: NTP server IP addresses. + items: + type: string + type: array + type: object + type: array + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this subnet. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the subnet. Provided by the client when the + subnet is created. + type: string + networkId: + description: ID of the network this subnet belongs to. Only networks + that are in the distributed mode can have subnets. + type: string + networkIdRef: + description: Reference to a Network to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routeTableId: + description: The ID of the route table to assign to this subnet. + Assigned route table should belong to the same network as this + subnet. + type: string + v4CidrBlocks: + description: A list of blocks of internal IPv4 addresses that + are owned by this subnet. Provide this property when you create + the subnet. For example, 10.0.0.0/22 or 192.168.0.0/16. Blocks + of addresses must be unique and non-overlapping within a network. + Minimum subnet size is /28, and maximum subnet size is /16. + Only IPv4 is supported. + items: + type: string + type: array + zone: + description: Name of the Yandex.Cloud zone for this subnet. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.v4CidrBlocks is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.v4CidrBlocks) + || (has(self.initProvider) && has(self.initProvider.v4CidrBlocks))' + status: + description: SubnetStatus defines the observed state of Subnet. + properties: + atProvider: + properties: + createdAt: + type: string + description: + description: An optional description of the subnet. Provide this + property when you create the resource. + type: string + dhcpOptions: + description: Options for DHCP client. The structure is documented + below. + items: + properties: + domainName: + description: Domain name. + type: string + domainNameServers: + description: Domain name server IP addresses. + items: + type: string + type: array + ntpServers: + description: NTP server IP addresses. + items: + type: string + type: array + type: object + type: array + folderId: + description: The ID of the folder to which the resource belongs. + If omitted, the provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this subnet. A list of key/value + pairs. + type: object + x-kubernetes-map-type: granular + name: + description: Name of the subnet. Provided by the client when the + subnet is created. + type: string + networkId: + description: ID of the network this subnet belongs to. Only networks + that are in the distributed mode can have subnets. + type: string + routeTableId: + description: The ID of the route table to assign to this subnet. + Assigned route table should belong to the same network as this + subnet. + type: string + v4CidrBlocks: + description: A list of blocks of internal IPv4 addresses that + are owned by this subnet. Provide this property when you create + the subnet. For example, 10.0.0.0/22 or 192.168.0.0/16. Blocks + of addresses must be unique and non-overlapping within a network. + Minimum subnet size is /28, and maximum subnet size is /16. + Only IPv4 is supported. + items: + type: string + type: array + v6CidrBlocks: + description: An optional list of blocks of IPv6 addresses that + are owned by this subnet. + items: + type: string + type: array + zone: + description: Name of the Yandex.Cloud zone for this subnet. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/yandex-cloud.upjet.crossplane.io_providerconfigs.yaml b/package/crds/yandex-cloud.upjet.crossplane.io_providerconfigs.yaml new file mode 100644 index 0000000..1dee150 --- /dev/null +++ b/package/crds/yandex-cloud.upjet.crossplane.io_providerconfigs.yaml @@ -0,0 +1,173 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: providerconfigs.yandex-cloud.upjet.crossplane.io +spec: + group: yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - provider + - yandex-cloud + kind: ProviderConfig + listKind: ProviderConfigList + plural: providerconfigs + singular: providerconfig + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .spec.credentials.secretRef.name + name: SECRET-NAME + priority: 1 + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: A ProviderConfig configures a YandexCloud provider. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: A ProviderConfigSpec defines the desired state of a ProviderConfig. + properties: + credentials: + description: Credentials required to authenticate to this provider. + properties: + env: + description: |- + Env is a reference to an environment variable that contains credentials + that must be used to connect to the provider. + properties: + name: + description: Name is the name of an environment variable. + type: string + required: + - name + type: object + fs: + description: |- + Fs is a reference to a filesystem location that contains credentials that + must be used to connect to the provider. + properties: + path: + description: Path is a filesystem path. + type: string + required: + - path + type: object + secretRef: + description: |- + A SecretRef is a reference to a secret key that contains the credentials + that must be used to connect to the provider. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: Source of the provider credentials. + enum: + - None + - Secret + - InjectedIdentity + - Environment + - Filesystem + type: string + required: + - source + type: object + required: + - credentials + type: object + status: + description: A ProviderConfigStatus reflects the observed state of a ProviderConfig. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + users: + description: Users of this provider configuration. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/yandex-cloud.upjet.crossplane.io_providerconfigusages.yaml b/package/crds/yandex-cloud.upjet.crossplane.io_providerconfigusages.yaml new file mode 100644 index 0000000..82dcdfb --- /dev/null +++ b/package/crds/yandex-cloud.upjet.crossplane.io_providerconfigusages.yaml @@ -0,0 +1,117 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: providerconfigusages.yandex-cloud.upjet.crossplane.io +spec: + group: yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - provider + - yandex-cloud + kind: ProviderConfigUsage + listKind: ProviderConfigUsageList + plural: providerconfigusages + singular: providerconfigusage + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .providerConfigRef.name + name: CONFIG-NAME + type: string + - jsonPath: .resourceRef.kind + name: RESOURCE-KIND + type: string + - jsonPath: .resourceRef.name + name: RESOURCE-NAME + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: A ProviderConfigUsage indicates that a resource is using a ProviderConfig. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + providerConfigRef: + description: ProviderConfigReference to the provider config being used. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceRef: + description: ResourceReference to the managed resource using the provider + config. + properties: + apiVersion: + description: APIVersion of the referenced object. + type: string + kind: + description: Kind of the referenced object. + type: string + name: + description: Name of the referenced object. + type: string + uid: + description: UID of the referenced object. + type: string + required: + - apiVersion + - kind + - name + type: object + required: + - providerConfigRef + - resourceRef + type: object + served: true + storage: true + subresources: {} diff --git a/package/crds/yandex-cloud.upjet.crossplane.io_storeconfigs.yaml b/package/crds/yandex-cloud.upjet.crossplane.io_storeconfigs.yaml new file mode 100644 index 0000000..86d8613 --- /dev/null +++ b/package/crds/yandex-cloud.upjet.crossplane.io_storeconfigs.yaml @@ -0,0 +1,223 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: storeconfigs.yandex-cloud.upjet.crossplane.io +spec: + group: yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - store + - gcp + kind: StoreConfig + listKind: StoreConfigList + plural: storeconfigs + singular: storeconfig + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .spec.type + name: TYPE + type: string + - jsonPath: .spec.defaultScope + name: DEFAULT-SCOPE + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: A StoreConfig configures how GCP controller should store connection + details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: A StoreConfigSpec defines the desired state of a ProviderConfig. + properties: + defaultScope: + description: |- + DefaultScope used for scoping secrets for "cluster-scoped" resources. + If store type is "Kubernetes", this would mean the default namespace to + store connection secrets for cluster scoped resources. + In case of "Vault", this would be used as the default parent path. + Typically, should be set as Crossplane installation namespace. + type: string + kubernetes: + description: |- + Kubernetes configures a Kubernetes secret store. + If the "type" is "Kubernetes" but no config provided, in cluster config + will be used. + properties: + auth: + description: Credentials used to connect to the Kubernetes API. + properties: + env: + description: |- + Env is a reference to an environment variable that contains credentials + that must be used to connect to the provider. + properties: + name: + description: Name is the name of an environment variable. + type: string + required: + - name + type: object + fs: + description: |- + Fs is a reference to a filesystem location that contains credentials that + must be used to connect to the provider. + properties: + path: + description: Path is a filesystem path. + type: string + required: + - path + type: object + secretRef: + description: |- + A SecretRef is a reference to a secret key that contains the credentials + that must be used to connect to the provider. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: Source of the credentials. + enum: + - None + - Secret + - Environment + - Filesystem + type: string + required: + - source + type: object + required: + - auth + type: object + plugin: + description: Plugin configures External secret store as a plugin. + properties: + configRef: + description: ConfigRef contains store config reference info. + properties: + apiVersion: + description: APIVersion of the referenced config. + type: string + kind: + description: Kind of the referenced config. + type: string + name: + description: Name of the referenced config. + type: string + required: + - apiVersion + - kind + - name + type: object + endpoint: + description: Endpoint is the endpoint of the gRPC server. + type: string + type: object + type: + default: Kubernetes + description: |- + Type configures which secret store to be used. Only the configuration + block for this store will be used and others will be ignored if provided. + Default is Kubernetes. + enum: + - Kubernetes + - Vault + - Plugin + type: string + required: + - defaultScope + type: object + status: + description: A StoreConfigStatus represents the status of a StoreConfig. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/yandex.yandex-cloud.upjet.crossplane.io_functions.yaml b/package/crds/yandex.yandex-cloud.upjet.crossplane.io_functions.yaml new file mode 100644 index 0000000..b7595d5 --- /dev/null +++ b/package/crds/yandex.yandex-cloud.upjet.crossplane.io_functions.yaml @@ -0,0 +1,1685 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: functions.yandex.yandex-cloud.upjet.crossplane.io +spec: + group: yandex.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Function + listKind: FunctionList + plural: functions + singular: function + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Function is the Schema for the Functions API. Allows management + of a Yandex Cloud Function. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionSpec defines the desired state of Function + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + asyncInvocation: + description: Config for asynchronous invocations of Yandex Cloud + Function + items: + properties: + retriesCount: + description: Maximum number of retries for async invocation + type: number + serviceAccountId: + description: Service account used for async invocation + type: string + ymqFailureTarget: + description: Target for unsuccessful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + type: object + type: array + ymqSuccessTarget: + description: Target for successful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account used for writing result + to queue + type: string + type: object + type: array + type: object + type: array + concurrency: + description: The maximum number of requests processed by a function + instance at the same time + type: number + connectivity: + description: Function version connectivity. If specified the version + will be attached to specified network + items: + properties: + networkId: + description: Network the version will have access to. It's + essential to specify network with subnets in all availability + zones + type: string + type: object + type: array + content: + description: Version deployment content for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified + items: + properties: + zipFilename: + description: Filename to zip archive for the version + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Function + type: string + entrypoint: + description: Entrypoint for Yandex Cloud Function + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variables for Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + executionTimeout: + description: Execution timeout in seconds for Yandex Cloud Function + type: string + folderId: + description: Folder ID for the Yandex Cloud Function + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Function + items: + properties: + disabled: + description: Is logging from function disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: Memory in megabytes (aligned to 128MB) for Yandex + Cloud Function + type: number + mounts: + description: Mounts for Yandex Cloud Function. + items: + properties: + ephemeralDisk: + description: One of the available mount types. Disk available + during the function execution time + items: + properties: + blockSizeKb: + description: Optional block size of the ephemeral + disk in KB + type: number + sizeGb: + description: Size of the ephemeral disk in GB + type: number + type: object + type: array + mode: + description: Mount’s accessibility mode. Valid values are + ro and rw + type: string + name: + description: Yandex Cloud Function name used to define trigger + type: string + objectStorage: + description: One of the available mount types. Object storage + as a mount + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + bucketRef: + description: Reference to a Bucket in storage to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in storage to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + prefix: + description: Prefix within the bucket. If you leave + this field empty, the entire bucket will be mounted + type: string + type: object + type: array + type: object + type: array + name: + description: Yandex Cloud Function name used to define trigger + type: string + package: + description: Version deployment package for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified + items: + properties: + bucketName: + description: Name of the bucket that stores the code for + the version + type: string + objectName: + description: Name of the object in the bucket that stores + the code for the version + type: string + sha256: + description: SHA256 hash of the version deployment package + type: string + type: object + type: array + runtime: + description: Runtime for Yandex Cloud Function + type: string + secrets: + description: Secrets for Yandex Cloud Function. + items: + properties: + environmentVariable: + description: Function's environment variable in which secret's + value will be stored + type: string + id: + description: Secret's id + type: string + idRef: + description: Reference to a Secret in lockbox to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Secret in lockbox to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Secret's entries key which value will be stored + in environment variable + type: string + versionId: + description: Secret's version id + type: string + versionIdRef: + description: Reference to a SecretVersion in lockbox to + populate versionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + versionIdSelector: + description: Selector for a SecretVersion in lockbox to + populate versionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + storageMounts: + description: (DEPRECATED, use mounts.0.object_storage instead) + Storage mounts for Yandex Cloud Function + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + mountPointName: + description: Name of the mount point. The directory where + the bucket is mounted will be accessible at the /function/storage/ + path + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted + type: string + readOnly: + description: Mount the bucket in read-only mode + type: boolean + type: object + type: array + tags: + description: Tags for Yandex Cloud Function. Tag "$latest" isn't + returned + items: + type: string + type: array + x-kubernetes-list-type: set + tmpfsSize: + description: Tmpfs size for Yandex Cloud Function + type: number + userHash: + description: User-defined string for current function version. + User must change this string any times when function changed. + Function will be updated when hash is changed. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + asyncInvocation: + description: Config for asynchronous invocations of Yandex Cloud + Function + items: + properties: + retriesCount: + description: Maximum number of retries for async invocation + type: number + serviceAccountId: + description: Service account used for async invocation + type: string + ymqFailureTarget: + description: Target for unsuccessful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + type: object + type: array + ymqSuccessTarget: + description: Target for successful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account used for writing result + to queue + type: string + type: object + type: array + type: object + type: array + concurrency: + description: The maximum number of requests processed by a function + instance at the same time + type: number + connectivity: + description: Function version connectivity. If specified the version + will be attached to specified network + items: + properties: + networkId: + description: Network the version will have access to. It's + essential to specify network with subnets in all availability + zones + type: string + type: object + type: array + content: + description: Version deployment content for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified + items: + properties: + zipFilename: + description: Filename to zip archive for the version + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Function + type: string + entrypoint: + description: Entrypoint for Yandex Cloud Function + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variables for Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + executionTimeout: + description: Execution timeout in seconds for Yandex Cloud Function + type: string + folderId: + description: Folder ID for the Yandex Cloud Function + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Function + items: + properties: + disabled: + description: Is logging from function disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: Memory in megabytes (aligned to 128MB) for Yandex + Cloud Function + type: number + mounts: + description: Mounts for Yandex Cloud Function. + items: + properties: + ephemeralDisk: + description: One of the available mount types. Disk available + during the function execution time + items: + properties: + blockSizeKb: + description: Optional block size of the ephemeral + disk in KB + type: number + sizeGb: + description: Size of the ephemeral disk in GB + type: number + type: object + type: array + mode: + description: Mount’s accessibility mode. Valid values are + ro and rw + type: string + name: + description: Yandex Cloud Function name used to define trigger + type: string + objectStorage: + description: One of the available mount types. Object storage + as a mount + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + bucketRef: + description: Reference to a Bucket in storage to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in storage to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + prefix: + description: Prefix within the bucket. If you leave + this field empty, the entire bucket will be mounted + type: string + type: object + type: array + type: object + type: array + name: + description: Yandex Cloud Function name used to define trigger + type: string + package: + description: Version deployment package for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified + items: + properties: + bucketName: + description: Name of the bucket that stores the code for + the version + type: string + objectName: + description: Name of the object in the bucket that stores + the code for the version + type: string + sha256: + description: SHA256 hash of the version deployment package + type: string + type: object + type: array + runtime: + description: Runtime for Yandex Cloud Function + type: string + secrets: + description: Secrets for Yandex Cloud Function. + items: + properties: + environmentVariable: + description: Function's environment variable in which secret's + value will be stored + type: string + id: + description: Secret's id + type: string + idRef: + description: Reference to a Secret in lockbox to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Secret in lockbox to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Secret's entries key which value will be stored + in environment variable + type: string + versionId: + description: Secret's version id + type: string + versionIdRef: + description: Reference to a SecretVersion in lockbox to + populate versionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + versionIdSelector: + description: Selector for a SecretVersion in lockbox to + populate versionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + storageMounts: + description: (DEPRECATED, use mounts.0.object_storage instead) + Storage mounts for Yandex Cloud Function + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + mountPointName: + description: Name of the mount point. The directory where + the bucket is mounted will be accessible at the /function/storage/ + path + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted + type: string + readOnly: + description: Mount the bucket in read-only mode + type: boolean + type: object + type: array + tags: + description: Tags for Yandex Cloud Function. Tag "$latest" isn't + returned + items: + type: string + type: array + x-kubernetes-list-type: set + tmpfsSize: + description: Tmpfs size for Yandex Cloud Function + type: number + userHash: + description: User-defined string for current function version. + User must change this string any times when function changed. + Function will be updated when hash is changed. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.entrypoint is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.entrypoint) + || (has(self.initProvider) && has(self.initProvider.entrypoint))' + - message: spec.forProvider.memory is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.memory) + || (has(self.initProvider) && has(self.initProvider.memory))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.runtime is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.runtime) + || (has(self.initProvider) && has(self.initProvider.runtime))' + - message: spec.forProvider.userHash is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.userHash) + || (has(self.initProvider) && has(self.initProvider.userHash))' + status: + description: FunctionStatus defines the observed state of Function. + properties: + atProvider: + properties: + asyncInvocation: + description: Config for asynchronous invocations of Yandex Cloud + Function + items: + properties: + retriesCount: + description: Maximum number of retries for async invocation + type: number + serviceAccountId: + description: Service account used for async invocation + type: string + ymqFailureTarget: + description: Target for unsuccessful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + type: object + type: array + ymqSuccessTarget: + description: Target for successful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account used for writing result + to queue + type: string + type: object + type: array + type: object + type: array + concurrency: + description: The maximum number of requests processed by a function + instance at the same time + type: number + connectivity: + description: Function version connectivity. If specified the version + will be attached to specified network + items: + properties: + networkId: + description: Network the version will have access to. It's + essential to specify network with subnets in all availability + zones + type: string + type: object + type: array + content: + description: Version deployment content for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified + items: + properties: + zipFilename: + description: Filename to zip archive for the version + type: string + type: object + type: array + createdAt: + description: Creation timestamp of the Yandex Cloud Function + type: string + description: + description: Description of the Yandex Cloud Function + type: string + entrypoint: + description: Entrypoint for Yandex Cloud Function + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variables for Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + executionTimeout: + description: Execution timeout in seconds for Yandex Cloud Function + type: string + folderId: + description: Folder ID for the Yandex Cloud Function + type: string + id: + description: Secret's id + type: string + imageSize: + description: Image size for Yandex Cloud Function + type: number + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Function + items: + properties: + disabled: + description: Is logging from function disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: Memory in megabytes (aligned to 128MB) for Yandex + Cloud Function + type: number + mounts: + description: Mounts for Yandex Cloud Function. + items: + properties: + ephemeralDisk: + description: One of the available mount types. Disk available + during the function execution time + items: + properties: + blockSizeKb: + description: Optional block size of the ephemeral + disk in KB + type: number + sizeGb: + description: Size of the ephemeral disk in GB + type: number + type: object + type: array + mode: + description: Mount’s accessibility mode. Valid values are + ro and rw + type: string + name: + description: Yandex Cloud Function name used to define trigger + type: string + objectStorage: + description: One of the available mount types. Object storage + as a mount + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + prefix: + description: Prefix within the bucket. If you leave + this field empty, the entire bucket will be mounted + type: string + type: object + type: array + type: object + type: array + name: + description: Yandex Cloud Function name used to define trigger + type: string + package: + description: Version deployment package for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified + items: + properties: + bucketName: + description: Name of the bucket that stores the code for + the version + type: string + objectName: + description: Name of the object in the bucket that stores + the code for the version + type: string + sha256: + description: SHA256 hash of the version deployment package + type: string + type: object + type: array + runtime: + description: Runtime for Yandex Cloud Function + type: string + secrets: + description: Secrets for Yandex Cloud Function. + items: + properties: + environmentVariable: + description: Function's environment variable in which secret's + value will be stored + type: string + id: + description: Secret's id + type: string + key: + description: Secret's entries key which value will be stored + in environment variable + type: string + versionId: + description: Secret's version id + type: string + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + storageMounts: + description: (DEPRECATED, use mounts.0.object_storage instead) + Storage mounts for Yandex Cloud Function + items: + properties: + bucket: + description: Name of the mounting bucket + type: string + mountPointName: + description: Name of the mount point. The directory where + the bucket is mounted will be accessible at the /function/storage/ + path + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted + type: string + readOnly: + description: Mount the bucket in read-only mode + type: boolean + type: object + type: array + tags: + description: Tags for Yandex Cloud Function. Tag "$latest" isn't + returned + items: + type: string + type: array + x-kubernetes-list-type: set + tmpfsSize: + description: Tmpfs size for Yandex Cloud Function + type: number + userHash: + description: User-defined string for current function version. + User must change this string any times when function changed. + Function will be updated when hash is changed. + type: string + version: + description: Version for Yandex Cloud Function + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ydb.yandex-cloud.upjet.crossplane.io_databasededicateds.yaml b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_databasededicateds.yaml new file mode 100644 index 0000000..313749d --- /dev/null +++ b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_databasededicateds.yaml @@ -0,0 +1,1112 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: databasededicateds.ydb.yandex-cloud.upjet.crossplane.io +spec: + group: ydb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: DatabaseDedicated + listKind: DatabaseDedicatedList + plural: databasededicateds + singular: databasededicated + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DatabaseDedicated is the Schema for the DatabaseDedicateds API. + Manages Yandex Database dedicated cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DatabaseDedicatedSpec defines the desired state of DatabaseDedicated + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + assignPublicIps: + description: Whether public IP addresses should be assigned to + the Yandex Database cluster. + type: boolean + deletionProtection: + description: Inhibits deletion of the database. Can be either + true or false + type: boolean + description: + description: A description for the Yandex Database cluster. + type: string + folderId: + description: ID of the folder that the Yandex Database cluster + belongs to. It will be deduced from provider configuration if + not set explicitly. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Database cluster. + type: object + x-kubernetes-map-type: granular + location: + description: Location for the Yandex Database cluster. The structure + is documented below. + items: + properties: + region: + description: Region for the Yandex Database cluster. The + structure is documented below. + items: + properties: + id: + description: Region ID for the Yandex Database cluster. + type: string + type: object + type: array + type: object + type: array + locationId: + description: Location ID for the Yandex Database cluster. + type: string + name: + description: Name of the Yandex Database cluster. + type: string + networkId: + description: ID of the network to attach the Yandex Database cluster + to. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourcePresetId: + description: The Yandex Database cluster preset. Available presets + can be obtained via yc ydb resource-preset list command. + type: string + scalePolicy: + description: Scaling policy for the Yandex Database cluster. The + structure is documented below. + items: + properties: + fixedScale: + description: Fixed scaling policy for the Yandex Database + cluster. The structure is documented below. + items: + properties: + size: + description: Number of instances for the Yandex Database + cluster. + type: number + type: object + type: array + type: object + type: array + sleepAfter: + type: number + storageConfig: + description: A list of storage configuration options for the Yandex + Database cluster. The structure is documented below. + items: + properties: + groupCount: + description: Amount of storage groups of selected type for + the Yandex Database cluster. + type: number + storageTypeId: + description: Storage type ID for the Yandex Database cluster. + Available presets can be obtained via yc ydb storage-type + list command. + type: string + type: object + type: array + subnetIds: + description: List of subnet IDs to attach the Yandex Database + cluster to. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in vpc to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in vpc to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + assignPublicIps: + description: Whether public IP addresses should be assigned to + the Yandex Database cluster. + type: boolean + deletionProtection: + description: Inhibits deletion of the database. Can be either + true or false + type: boolean + description: + description: A description for the Yandex Database cluster. + type: string + folderId: + description: ID of the folder that the Yandex Database cluster + belongs to. It will be deduced from provider configuration if + not set explicitly. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Database cluster. + type: object + x-kubernetes-map-type: granular + location: + description: Location for the Yandex Database cluster. The structure + is documented below. + items: + properties: + region: + description: Region for the Yandex Database cluster. The + structure is documented below. + items: + properties: + id: + description: Region ID for the Yandex Database cluster. + type: string + type: object + type: array + type: object + type: array + locationId: + description: Location ID for the Yandex Database cluster. + type: string + name: + description: Name of the Yandex Database cluster. + type: string + networkId: + description: ID of the network to attach the Yandex Database cluster + to. + type: string + networkIdRef: + description: Reference to a Network in vpc to populate networkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkIdSelector: + description: Selector for a Network in vpc to populate networkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourcePresetId: + description: The Yandex Database cluster preset. Available presets + can be obtained via yc ydb resource-preset list command. + type: string + scalePolicy: + description: Scaling policy for the Yandex Database cluster. The + structure is documented below. + items: + properties: + fixedScale: + description: Fixed scaling policy for the Yandex Database + cluster. The structure is documented below. + items: + properties: + size: + description: Number of instances for the Yandex Database + cluster. + type: number + type: object + type: array + type: object + type: array + sleepAfter: + type: number + storageConfig: + description: A list of storage configuration options for the Yandex + Database cluster. The structure is documented below. + items: + properties: + groupCount: + description: Amount of storage groups of selected type for + the Yandex Database cluster. + type: number + storageTypeId: + description: Storage type ID for the Yandex Database cluster. + Available presets can be obtained via yc ydb storage-type + list command. + type: string + type: object + type: array + subnetIds: + description: List of subnet IDs to attach the Yandex Database + cluster to. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in vpc to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in vpc to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.resourcePresetId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resourcePresetId) + || (has(self.initProvider) && has(self.initProvider.resourcePresetId))' + - message: spec.forProvider.scalePolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scalePolicy) + || (has(self.initProvider) && has(self.initProvider.scalePolicy))' + - message: spec.forProvider.storageConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageConfig) + || (has(self.initProvider) && has(self.initProvider.storageConfig))' + status: + description: DatabaseDedicatedStatus defines the observed state of DatabaseDedicated. + properties: + atProvider: + properties: + assignPublicIps: + description: Whether public IP addresses should be assigned to + the Yandex Database cluster. + type: boolean + createdAt: + description: The Yandex Database cluster creation timestamp. + type: string + databasePath: + description: Full database path of the Yandex Database cluster. + Useful for SDK configuration. + type: string + deletionProtection: + description: Inhibits deletion of the database. Can be either + true or false + type: boolean + description: + description: A description for the Yandex Database cluster. + type: string + folderId: + description: ID of the folder that the Yandex Database cluster + belongs to. It will be deduced from provider configuration if + not set explicitly. + type: string + id: + description: ID of the Yandex Database cluster. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Database cluster. + type: object + x-kubernetes-map-type: granular + location: + description: Location for the Yandex Database cluster. The structure + is documented below. + items: + properties: + region: + description: Region for the Yandex Database cluster. The + structure is documented below. + items: + properties: + id: + description: Region ID for the Yandex Database cluster. + type: string + type: object + type: array + type: object + type: array + locationId: + description: Location ID for the Yandex Database cluster. + type: string + name: + description: Name of the Yandex Database cluster. + type: string + networkId: + description: ID of the network to attach the Yandex Database cluster + to. + type: string + resourcePresetId: + description: The Yandex Database cluster preset. Available presets + can be obtained via yc ydb resource-preset list command. + type: string + scalePolicy: + description: Scaling policy for the Yandex Database cluster. The + structure is documented below. + items: + properties: + fixedScale: + description: Fixed scaling policy for the Yandex Database + cluster. The structure is documented below. + items: + properties: + size: + description: Number of instances for the Yandex Database + cluster. + type: number + type: object + type: array + type: object + type: array + sleepAfter: + type: number + status: + description: Status of the Yandex Database cluster. + type: string + storageConfig: + description: A list of storage configuration options for the Yandex + Database cluster. The structure is documented below. + items: + properties: + groupCount: + description: Amount of storage groups of selected type for + the Yandex Database cluster. + type: number + storageTypeId: + description: Storage type ID for the Yandex Database cluster. + Available presets can be obtained via yc ydb storage-type + list command. + type: string + type: object + type: array + subnetIds: + description: List of subnet IDs to attach the Yandex Database + cluster to. + items: + type: string + type: array + x-kubernetes-list-type: set + tlsEnabled: + description: Whether TLS is enabled for the Yandex Database cluster. + Useful for SDK configuration. + type: boolean + ydbApiEndpoint: + description: API endpoint of the Yandex Database cluster. Useful + for SDK configuration. + type: string + ydbFullEndpoint: + description: Full endpoint of the Yandex Database cluster. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ydb.yandex-cloud.upjet.crossplane.io_databaseiambindings.yaml b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_databaseiambindings.yaml new file mode 100644 index 0000000..265b599 --- /dev/null +++ b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_databaseiambindings.yaml @@ -0,0 +1,683 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: databaseiambindings.ydb.yandex-cloud.upjet.crossplane.io +spec: + group: ydb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: DatabaseIAMBinding + listKind: DatabaseIAMBindingList + plural: databaseiambindings + singular: databaseiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DatabaseIAMBinding is the Schema for the DatabaseIAMBindings + API. Allows management of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DatabaseIAMBindingSpec defines the desired state of DatabaseIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + databaseId: + description: The Managed Service YDB instance Database ID to apply + a binding to. + type: string + databaseIdRef: + description: Reference to a DatabaseServerless to populate databaseId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseIdSelector: + description: Selector for a DatabaseServerless to populate databaseId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + databaseId: + description: The Managed Service YDB instance Database ID to apply + a binding to. + type: string + databaseIdRef: + description: Reference to a DatabaseServerless to populate databaseId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseIdSelector: + description: Selector for a DatabaseServerless to populate databaseId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + serviceAccountRef: + description: References to ServiceAccount in iam to populate members. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + serviceAccountSelector: + description: Selector for a list of ServiceAccount in iam to populate + members. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: DatabaseIAMBindingStatus defines the observed state of DatabaseIAMBinding. + properties: + atProvider: + properties: + databaseId: + description: The Managed Service YDB instance Database ID to apply + a binding to. + type: string + id: + type: string + members: + description: 'Identities that will be granted the privilege in + role. Each entry can have one of the following values:' + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ydb.yandex-cloud.upjet.crossplane.io_databaseserverlesses.yaml b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_databaseserverlesses.yaml new file mode 100644 index 0000000..beeec91 --- /dev/null +++ b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_databaseserverlesses.yaml @@ -0,0 +1,632 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: databaseserverlesses.ydb.yandex-cloud.upjet.crossplane.io +spec: + group: ydb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: DatabaseServerless + listKind: DatabaseServerlessList + plural: databaseserverlesses + singular: databaseserverless + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DatabaseServerless is the Schema for the DatabaseServerlesss + API. Manages Yandex Database serverless cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DatabaseServerlessSpec defines the desired state of DatabaseServerless + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deletionProtection: + description: Inhibits deletion of the database. Can be either + true or false + type: boolean + description: + description: A description for the Yandex Database serverless + cluster. + type: string + folderId: + description: ID of the folder that the Yandex Database serverless + cluster belongs to. It will be deduced from provider configuration + if not set explicitly. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Database serverless cluster. + type: object + x-kubernetes-map-type: granular + locationId: + description: Location ID for the Yandex Database serverless cluster. + type: string + name: + description: Name for the Yandex Database serverless cluster. + type: string + serverlessDatabase: + items: + properties: + enableThrottlingRcuLimit: + type: boolean + provisionedRcuLimit: + type: number + storageSizeLimit: + type: number + throttlingRcuLimit: + type: number + type: object + type: array + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deletionProtection: + description: Inhibits deletion of the database. Can be either + true or false + type: boolean + description: + description: A description for the Yandex Database serverless + cluster. + type: string + folderId: + description: ID of the folder that the Yandex Database serverless + cluster belongs to. It will be deduced from provider configuration + if not set explicitly. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Database serverless cluster. + type: object + x-kubernetes-map-type: granular + locationId: + description: Location ID for the Yandex Database serverless cluster. + type: string + name: + description: Name for the Yandex Database serverless cluster. + type: string + serverlessDatabase: + items: + properties: + enableThrottlingRcuLimit: + type: boolean + provisionedRcuLimit: + type: number + storageSizeLimit: + type: number + throttlingRcuLimit: + type: number + type: object + type: array + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: DatabaseServerlessStatus defines the observed state of DatabaseServerless. + properties: + atProvider: + properties: + createdAt: + description: The Yandex Database serverless cluster creation timestamp. + type: string + databasePath: + description: Full database path of the Yandex Database serverless + cluster. Useful for SDK configuration. + type: string + deletionProtection: + description: Inhibits deletion of the database. Can be either + true or false + type: boolean + description: + description: A description for the Yandex Database serverless + cluster. + type: string + documentApiEndpoint: + description: Document API endpoint of the Yandex Database serverless + cluster. + type: string + folderId: + description: ID of the folder that the Yandex Database serverless + cluster belongs to. It will be deduced from provider configuration + if not set explicitly. + type: string + id: + description: ID of the Yandex Database serverless cluster. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Database serverless cluster. + type: object + x-kubernetes-map-type: granular + locationId: + description: Location ID for the Yandex Database serverless cluster. + type: string + name: + description: Name for the Yandex Database serverless cluster. + type: string + serverlessDatabase: + items: + properties: + enableThrottlingRcuLimit: + type: boolean + provisionedRcuLimit: + type: number + storageSizeLimit: + type: number + throttlingRcuLimit: + type: number + type: object + type: array + sleepAfter: + type: number + status: + description: Status of the Yandex Database serverless cluster. + type: string + tlsEnabled: + description: Whether TLS is enabled for the Yandex Database serverless + cluster. Useful for SDK configuration. + type: boolean + ydbApiEndpoint: + description: API endpoint of the Yandex Database serverless cluster. + Useful for SDK configuration. + type: string + ydbFullEndpoint: + description: Full endpoint of the Yandex Database serverless cluster. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ydb.yandex-cloud.upjet.crossplane.io_tablechangefeeds.yaml b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_tablechangefeeds.yaml new file mode 100644 index 0000000..4c3f2b1 --- /dev/null +++ b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_tablechangefeeds.yaml @@ -0,0 +1,480 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: tablechangefeeds.ydb.yandex-cloud.upjet.crossplane.io +spec: + group: ydb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: TableChangefeed + listKind: TableChangefeedList + plural: tablechangefeeds + singular: tablechangefeed + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TableChangefeed is the Schema for the TableChangefeeds API. Manages + Yandex Database dedicated cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TableChangefeedSpec defines the desired state of TableChangefeed + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + connectionString: + description: Connection string, conflicts with table_id + type: string + consumer: + description: Changefeed consumers - named entities for reading + data from the topic. + items: + properties: + important: + type: boolean + name: + description: ': Changefeed name.' + type: string + startingMessageTimestampMs: + description: Timestamp in the UNIX timestamp format, from + which the consumer will start reading data + type: number + supportedCodecs: + description: Supported data encodings + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + format: + description: ': Changefeed format. Only JSON format is available.' + type: string + mode: + description: ': Changefeed operating mode. The available changefeed + operating modes are presented in the documentation.' + type: string + name: + description: ': Changefeed name.' + type: string + retentionPeriod: + description: Time of data retention in the topic, ISO 8601 format + type: string + tableId: + description: ': ID of the table for which we create the changefeed.' + type: string + tablePath: + description: Table path + type: string + virtualTimestamps: + description: Use virtual timestamps + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + connectionString: + description: Connection string, conflicts with table_id + type: string + consumer: + description: Changefeed consumers - named entities for reading + data from the topic. + items: + properties: + important: + type: boolean + name: + description: ': Changefeed name.' + type: string + startingMessageTimestampMs: + description: Timestamp in the UNIX timestamp format, from + which the consumer will start reading data + type: number + supportedCodecs: + description: Supported data encodings + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + format: + description: ': Changefeed format. Only JSON format is available.' + type: string + mode: + description: ': Changefeed operating mode. The available changefeed + operating modes are presented in the documentation.' + type: string + name: + description: ': Changefeed name.' + type: string + retentionPeriod: + description: Time of data retention in the topic, ISO 8601 format + type: string + tableId: + description: ': ID of the table for which we create the changefeed.' + type: string + tablePath: + description: Table path + type: string + virtualTimestamps: + description: Use virtual timestamps + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.format is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.format) + || (has(self.initProvider) && has(self.initProvider.format))' + - message: spec.forProvider.mode is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.mode) + || (has(self.initProvider) && has(self.initProvider.mode))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: TableChangefeedStatus defines the observed state of TableChangefeed. + properties: + atProvider: + properties: + connectionString: + description: Connection string, conflicts with table_id + type: string + consumer: + description: Changefeed consumers - named entities for reading + data from the topic. + items: + properties: + important: + type: boolean + name: + description: ': Changefeed name.' + type: string + startingMessageTimestampMs: + description: Timestamp in the UNIX timestamp format, from + which the consumer will start reading data + type: number + supportedCodecs: + description: Supported data encodings + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + format: + description: ': Changefeed format. Only JSON format is available.' + type: string + id: + type: string + mode: + description: ': Changefeed operating mode. The available changefeed + operating modes are presented in the documentation.' + type: string + name: + description: ': Changefeed name.' + type: string + retentionPeriod: + description: Time of data retention in the topic, ISO 8601 format + type: string + tableId: + description: ': ID of the table for which we create the changefeed.' + type: string + tablePath: + description: Table path + type: string + virtualTimestamps: + description: Use virtual timestamps + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ydb.yandex-cloud.upjet.crossplane.io_tableindices.yaml b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_tableindices.yaml new file mode 100644 index 0000000..651449e --- /dev/null +++ b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_tableindices.yaml @@ -0,0 +1,414 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: tableindices.ydb.yandex-cloud.upjet.crossplane.io +spec: + group: ydb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: TableIndex + listKind: TableIndexList + plural: tableindices + singular: tableindex + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TableIndex is the Schema for the TableIndexs API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TableIndexSpec defines the desired state of TableIndex + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + columns: + description: (List of String) + items: + type: string + type: array + connectionString: + description: (String) + type: string + cover: + description: (List of String) + items: + type: string + type: array + name: + description: (String) + type: string + tableId: + description: (String) + type: string + tablePath: + description: (String) + type: string + type: + description: (String) + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + columns: + description: (List of String) + items: + type: string + type: array + connectionString: + description: (String) + type: string + cover: + description: (List of String) + items: + type: string + type: array + name: + description: (String) + type: string + tableId: + description: (String) + type: string + tablePath: + description: (String) + type: string + type: + description: (String) + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.columns is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.columns) + || (has(self.initProvider) && has(self.initProvider.columns))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: TableIndexStatus defines the observed state of TableIndex. + properties: + atProvider: + properties: + columns: + description: (List of String) + items: + type: string + type: array + connectionString: + description: (String) + type: string + cover: + description: (List of String) + items: + type: string + type: array + id: + description: (String) The ID of this resource. + type: string + name: + description: (String) + type: string + tableId: + description: (String) + type: string + tablePath: + description: (String) + type: string + type: + description: (String) + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ydb.yandex-cloud.upjet.crossplane.io_tables.yaml b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_tables.yaml new file mode 100644 index 0000000..0506b16 --- /dev/null +++ b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_tables.yaml @@ -0,0 +1,800 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: tables.ydb.yandex-cloud.upjet.crossplane.io +spec: + group: ydb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Table + listKind: TableList + plural: tables + singular: table + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Table is the Schema for the Tables API. Manages Yandex Database + dedicated cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TableSpec defines the desired state of Table + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + attributes: + additionalProperties: + type: string + description: A map of table attributes. + type: object + x-kubernetes-map-type: granular + column: + description: A list of column configuration options. The structure + is documented below. + items: + properties: + family: + description: Column group + type: string + name: + description: Column name + type: string + notNull: + description: 'A column cannot have the NULL data type. ( + Default: false )' + type: boolean + type: + description: Column data type. YQL data types are used. + type: string + type: object + type: array + connectionString: + description: Connection string for database. + type: string + connectionStringRef: + description: Reference to a DatabaseServerless in ydb to populate + connectionString. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionStringSelector: + description: Selector for a DatabaseServerless in ydb to populate + connectionString. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + family: + description: A list of column group configuration options. The + structure is documented below. + items: + properties: + compression: + description: 'Data codec (acceptable values: off, lz4).' + type: string + data: + description: 'Type of storage device for column data in + this group (acceptable values: ssd, rot (from HDD spindle + rotation)).' + type: string + name: + description: Column family name + type: string + type: object + type: array + keyBloomFilter: + description: Use the Bloom filter for the primary key + type: boolean + partitioningSettings: + description: Table partiotioning settings The structure is documented + below. + items: + properties: + autoPartitioningByLoad: + type: boolean + autoPartitioningBySizeEnabled: + type: boolean + autoPartitioningMaxPartitionsCount: + type: number + autoPartitioningMinPartitionsCount: + type: number + autoPartitioningPartitionSizeMb: + type: number + partitionAtKeys: + items: + properties: + keys: + items: + type: string + type: array + type: object + type: array + uniformPartitions: + type: number + type: object + type: array + path: + description: Table path. + type: string + primaryKey: + description: A list of table columns to be uased as primary key. + items: + type: string + type: array + readReplicasSettings: + description: Read replication settings + type: string + ttl: + description: ttl TTL settings The structure is documented below. + items: + properties: + columnName: + description: Column name for TTL + type: string + expireInterval: + description: Interval in the ISO 8601 format + type: string + unit: + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + attributes: + additionalProperties: + type: string + description: A map of table attributes. + type: object + x-kubernetes-map-type: granular + column: + description: A list of column configuration options. The structure + is documented below. + items: + properties: + family: + description: Column group + type: string + name: + description: Column name + type: string + notNull: + description: 'A column cannot have the NULL data type. ( + Default: false )' + type: boolean + type: + description: Column data type. YQL data types are used. + type: string + type: object + type: array + connectionString: + description: Connection string for database. + type: string + connectionStringRef: + description: Reference to a DatabaseServerless in ydb to populate + connectionString. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionStringSelector: + description: Selector for a DatabaseServerless in ydb to populate + connectionString. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + family: + description: A list of column group configuration options. The + structure is documented below. + items: + properties: + compression: + description: 'Data codec (acceptable values: off, lz4).' + type: string + data: + description: 'Type of storage device for column data in + this group (acceptable values: ssd, rot (from HDD spindle + rotation)).' + type: string + name: + description: Column family name + type: string + type: object + type: array + keyBloomFilter: + description: Use the Bloom filter for the primary key + type: boolean + partitioningSettings: + description: Table partiotioning settings The structure is documented + below. + items: + properties: + autoPartitioningByLoad: + type: boolean + autoPartitioningBySizeEnabled: + type: boolean + autoPartitioningMaxPartitionsCount: + type: number + autoPartitioningMinPartitionsCount: + type: number + autoPartitioningPartitionSizeMb: + type: number + partitionAtKeys: + items: + properties: + keys: + items: + type: string + type: array + type: object + type: array + uniformPartitions: + type: number + type: object + type: array + path: + description: Table path. + type: string + primaryKey: + description: A list of table columns to be uased as primary key. + items: + type: string + type: array + readReplicasSettings: + description: Read replication settings + type: string + ttl: + description: ttl TTL settings The structure is documented below. + items: + properties: + columnName: + description: Column name for TTL + type: string + expireInterval: + description: Interval in the ISO 8601 format + type: string + unit: + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.column is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.column) + || (has(self.initProvider) && has(self.initProvider.column))' + - message: spec.forProvider.path is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.path) + || (has(self.initProvider) && has(self.initProvider.path))' + - message: spec.forProvider.primaryKey is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.primaryKey) + || (has(self.initProvider) && has(self.initProvider.primaryKey))' + status: + description: TableStatus defines the observed state of Table. + properties: + atProvider: + properties: + attributes: + additionalProperties: + type: string + description: A map of table attributes. + type: object + x-kubernetes-map-type: granular + column: + description: A list of column configuration options. The structure + is documented below. + items: + properties: + family: + description: Column group + type: string + name: + description: Column name + type: string + notNull: + description: 'A column cannot have the NULL data type. ( + Default: false )' + type: boolean + type: + description: Column data type. YQL data types are used. + type: string + type: object + type: array + connectionString: + description: Connection string for database. + type: string + family: + description: A list of column group configuration options. The + structure is documented below. + items: + properties: + compression: + description: 'Data codec (acceptable values: off, lz4).' + type: string + data: + description: 'Type of storage device for column data in + this group (acceptable values: ssd, rot (from HDD spindle + rotation)).' + type: string + name: + description: Column family name + type: string + type: object + type: array + id: + type: string + keyBloomFilter: + description: Use the Bloom filter for the primary key + type: boolean + partitioningSettings: + description: Table partiotioning settings The structure is documented + below. + items: + properties: + autoPartitioningByLoad: + type: boolean + autoPartitioningBySizeEnabled: + type: boolean + autoPartitioningMaxPartitionsCount: + type: number + autoPartitioningMinPartitionsCount: + type: number + autoPartitioningPartitionSizeMb: + type: number + partitionAtKeys: + items: + properties: + keys: + items: + type: string + type: array + type: object + type: array + uniformPartitions: + type: number + type: object + type: array + path: + description: Table path. + type: string + primaryKey: + description: A list of table columns to be uased as primary key. + items: + type: string + type: array + readReplicasSettings: + description: Read replication settings + type: string + ttl: + description: ttl TTL settings The structure is documented below. + items: + properties: + columnName: + description: Column name for TTL + type: string + expireInterval: + description: Interval in the ISO 8601 format + type: string + unit: + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ydb.yandex-cloud.upjet.crossplane.io_topics.yaml b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_topics.yaml new file mode 100644 index 0000000..3ea1eef --- /dev/null +++ b/package/crds/ydb.yandex-cloud.upjet.crossplane.io_topics.yaml @@ -0,0 +1,678 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: topics.ydb.yandex-cloud.upjet.crossplane.io +spec: + group: ydb.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Topic + listKind: TopicList + plural: topics + singular: topic + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Topic is the Schema for the Topics API. Get information about + a Yandex YDB Topics. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TopicSpec defines the desired state of Topic + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + consumer: + description: 'Topic Readers. Types: array[consumer], optional. + Default value: null.' + items: + properties: + important: + description: 'Defines an important consumer. No data will + be deleted from the topic until all the important consumers + read them. Value type: boolean, default value: false.' + type: boolean + name: + description: 'Topic name. Type: string, required. Default + value: "".' + type: string + startingMessageTimestampMs: + description: 'Timestamp in UNIX timestamp format from which + the reader will start reading data. Type: integer, optional. + Default value: 0.' + type: number + supportedCodecs: + description: 'Supported data encodings. Types: array[string]. + Default value: ["gzip", "raw", "zstd"].' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + databaseEndpoint: + description: 'YDB database endpoint. Types: string, required. + Default value: "".' + type: string + databaseEndpointRef: + description: Reference to a DatabaseServerless in ydb to populate + databaseEndpoint. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseEndpointSelector: + description: Selector for a DatabaseServerless in ydb to populate + databaseEndpoint. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + type: string + meteringMode: + description: 'Resource metering mode (reserved_capacity - based + on the allocated resources or request_units - based on actual + usage). This option applies to topics in serverless databases. + Value type: String.' + type: string + name: + description: 'Topic name. Type: string, required. Default value: + "".' + type: string + partitionWriteSpeedKbps: + description: 'Maximum allowed write speed per partition. If a + write speed for a given partition exceeds this value, the write + speed will be capped. Value type: integer, default value: 1024 + (1MB).' + type: number + partitionsCount: + description: 'Number of partitions. Types: integer, optional. + Default value: 2.' + type: number + retentionPeriodHours: + type: number + retentionStorageMb: + type: number + supportedCodecs: + description: 'Supported data encodings. Types: array[string]. + Default value: ["gzip", "raw", "zstd"].' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + consumer: + description: 'Topic Readers. Types: array[consumer], optional. + Default value: null.' + items: + properties: + important: + description: 'Defines an important consumer. No data will + be deleted from the topic until all the important consumers + read them. Value type: boolean, default value: false.' + type: boolean + name: + description: 'Topic name. Type: string, required. Default + value: "".' + type: string + startingMessageTimestampMs: + description: 'Timestamp in UNIX timestamp format from which + the reader will start reading data. Type: integer, optional. + Default value: 0.' + type: number + supportedCodecs: + description: 'Supported data encodings. Types: array[string]. + Default value: ["gzip", "raw", "zstd"].' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + databaseEndpoint: + description: 'YDB database endpoint. Types: string, required. + Default value: "".' + type: string + databaseEndpointRef: + description: Reference to a DatabaseServerless in ydb to populate + databaseEndpoint. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseEndpointSelector: + description: Selector for a DatabaseServerless in ydb to populate + databaseEndpoint. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + type: string + meteringMode: + description: 'Resource metering mode (reserved_capacity - based + on the allocated resources or request_units - based on actual + usage). This option applies to topics in serverless databases. + Value type: String.' + type: string + name: + description: 'Topic name. Type: string, required. Default value: + "".' + type: string + partitionWriteSpeedKbps: + description: 'Maximum allowed write speed per partition. If a + write speed for a given partition exceeds this value, the write + speed will be capped. Value type: integer, default value: 1024 + (1MB).' + type: number + partitionsCount: + description: 'Number of partitions. Types: integer, optional. + Default value: 2.' + type: number + retentionPeriodHours: + type: number + retentionStorageMb: + type: number + supportedCodecs: + description: 'Supported data encodings. Types: array[string]. + Default value: ["gzip", "raw", "zstd"].' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: TopicStatus defines the observed state of Topic. + properties: + atProvider: + properties: + consumer: + description: 'Topic Readers. Types: array[consumer], optional. + Default value: null.' + items: + properties: + important: + description: 'Defines an important consumer. No data will + be deleted from the topic until all the important consumers + read them. Value type: boolean, default value: false.' + type: boolean + name: + description: 'Topic name. Type: string, required. Default + value: "".' + type: string + startingMessageTimestampMs: + description: 'Timestamp in UNIX timestamp format from which + the reader will start reading data. Type: integer, optional. + Default value: 0.' + type: number + supportedCodecs: + description: 'Supported data encodings. Types: array[string]. + Default value: ["gzip", "raw", "zstd"].' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + databaseEndpoint: + description: 'YDB database endpoint. Types: string, required. + Default value: "".' + type: string + description: + type: string + id: + type: string + meteringMode: + description: 'Resource metering mode (reserved_capacity - based + on the allocated resources or request_units - based on actual + usage). This option applies to topics in serverless databases. + Value type: String.' + type: string + name: + description: 'Topic name. Type: string, required. Default value: + "".' + type: string + partitionWriteSpeedKbps: + description: 'Maximum allowed write speed per partition. If a + write speed for a given partition exceeds this value, the write + speed will be capped. Value type: integer, default value: 1024 + (1MB).' + type: number + partitionsCount: + description: 'Number of partitions. Types: integer, optional. + Default value: 2.' + type: number + retentionPeriodHours: + type: number + retentionStorageMb: + type: number + supportedCodecs: + description: 'Supported data encodings. Types: array[string]. + Default value: ["gzip", "raw", "zstd"].' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}